blob: eb5205e487333e1f838d218c27f9b00b27f86b3c [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
Masami Hiramatsu60efe212020-12-08 17:54:09 +090071 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
Steven Rostedtb2821ae2009-02-02 21:38:32 -050073 */
Li Zefan020e5f82009-07-01 10:47:05 +080074bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050075
Masami Hiramatsu60efe212020-12-08 17:54:09 +090076#ifdef CONFIG_FTRACE_STARTUP_TEST
77void __init disable_tracing_selftest(const char *reason)
78{
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
82 }
83}
84#endif
85
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050086/* Pipe tracepoints to printk */
87struct trace_iterator *tracepoint_print_iter;
88int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050089static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050090
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010091/* For tracers that don't implement custom flags */
92static struct tracer_opt dummy_tracer_opt[] = {
93 { }
94};
95
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050096static int
97dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010098{
99 return 0;
100}
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
102/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
105 * occurred.
106 */
Joel Fernandesd914ba32017-06-26 19:01:55 -0700107static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400108
109/*
Steven Rostedt0f048702008-11-05 16:05:44 -0500110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
113 * this back to zero.
114 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100115static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500116
Jason Wessel955b61e2010-08-05 09:22:23 -0500117cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200118
Steven Rostedt944ac422008-10-23 19:26:08 -0400119/*
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121 *
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
126 * serial console.
127 *
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400133 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200134
135enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400136
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400137/* When set, tracing will stop when a WARN*() is hit */
138int __disable_trace_on_warning;
139
Jeremy Linton681bec02017-05-31 16:56:53 -0500140#ifdef CONFIG_TRACE_EVAL_MAP_FILE
141/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500142struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400143 struct module *mod;
144 unsigned long length;
145};
146
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500147union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500149struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400150 /*
151 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500152 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400153 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 const char *end; /* points to NULL */
156};
157
Jeremy Linton1793ed92017-05-31 16:56:46 -0500158static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400159
160/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500161 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500165 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400166 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500167union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500168 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400171};
172
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500173static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400175
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900176int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500177static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200178 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500179
Li Zefanee6c2c12009-09-18 14:06:47 +0800180#define MAX_TRACER_SIZE 100
181static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500182static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100183
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500184static bool allocate_snapshot;
185
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200186static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100187{
Chen Gang67012ab2013-04-08 12:06:44 +0800188 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500189 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400190 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500191 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100192 return 1;
193}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200194__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100195
Steven Rostedt944ac422008-10-23 19:26:08 -0400196static int __init set_ftrace_dump_on_oops(char *str)
197{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200198 if (*str++ != '=' || !*str) {
199 ftrace_dump_on_oops = DUMP_ALL;
200 return 1;
201 }
202
203 if (!strcmp("orig_cpu", str)) {
204 ftrace_dump_on_oops = DUMP_ORIG;
205 return 1;
206 }
207
208 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400209}
210__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200211
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400212static int __init stop_trace_on_warning(char *str)
213{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200214 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
215 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400216 return 1;
217}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200218__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400219
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400220static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500221{
222 allocate_snapshot = true;
223 /* We also need the main ring buffer expanded */
224 ring_buffer_expanded = true;
225 return 1;
226}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400227__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500228
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400229
230static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400231
232static int __init set_trace_boot_options(char *str)
233{
Chen Gang67012ab2013-04-08 12:06:44 +0800234 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400235 return 0;
236}
237__setup("trace_options=", set_trace_boot_options);
238
Steven Rostedte1e232c2014-02-10 23:38:46 -0500239static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
240static char *trace_boot_clock __initdata;
241
242static int __init set_trace_boot_clock(char *str)
243{
244 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
245 trace_boot_clock = trace_boot_clock_buf;
246 return 0;
247}
248__setup("trace_clock=", set_trace_boot_clock);
249
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500250static int __init set_tracepoint_printk(char *str)
251{
252 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
253 tracepoint_printk = 1;
254 return 1;
255}
256__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400257
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100258unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200259{
260 nsec += 500;
261 do_div(nsec, 1000);
262 return nsec;
263}
264
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300265static void
266trace_process_export(struct trace_export *export,
267 struct ring_buffer_event *event, int flag)
268{
269 struct trace_entry *entry;
270 unsigned int size = 0;
271
272 if (export->flags & flag) {
273 entry = ring_buffer_event_data(event);
274 size = ring_buffer_event_length(event);
275 export->write(export, entry, size);
276 }
277}
278
279static DEFINE_MUTEX(ftrace_export_lock);
280
281static struct trace_export __rcu *ftrace_exports_list __read_mostly;
282
283static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
284static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300285static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300286
287static inline void ftrace_exports_enable(struct trace_export *export)
288{
289 if (export->flags & TRACE_EXPORT_FUNCTION)
290 static_branch_inc(&trace_function_exports_enabled);
291
292 if (export->flags & TRACE_EXPORT_EVENT)
293 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300294
295 if (export->flags & TRACE_EXPORT_MARKER)
296 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300297}
298
299static inline void ftrace_exports_disable(struct trace_export *export)
300{
301 if (export->flags & TRACE_EXPORT_FUNCTION)
302 static_branch_dec(&trace_function_exports_enabled);
303
304 if (export->flags & TRACE_EXPORT_EVENT)
305 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300306
307 if (export->flags & TRACE_EXPORT_MARKER)
308 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300309}
310
311static void ftrace_exports(struct ring_buffer_event *event, int flag)
312{
313 struct trace_export *export;
314
315 preempt_disable_notrace();
316
317 export = rcu_dereference_raw_check(ftrace_exports_list);
318 while (export) {
319 trace_process_export(export, event, flag);
320 export = rcu_dereference_raw_check(export->next);
321 }
322
323 preempt_enable_notrace();
324}
325
326static inline void
327add_trace_export(struct trace_export **list, struct trace_export *export)
328{
329 rcu_assign_pointer(export->next, *list);
330 /*
331 * We are entering export into the list but another
332 * CPU might be walking that list. We need to make sure
333 * the export->next pointer is valid before another CPU sees
334 * the export pointer included into the list.
335 */
336 rcu_assign_pointer(*list, export);
337}
338
339static inline int
340rm_trace_export(struct trace_export **list, struct trace_export *export)
341{
342 struct trace_export **p;
343
344 for (p = list; *p != NULL; p = &(*p)->next)
345 if (*p == export)
346 break;
347
348 if (*p != export)
349 return -1;
350
351 rcu_assign_pointer(*p, (*p)->next);
352
353 return 0;
354}
355
356static inline void
357add_ftrace_export(struct trace_export **list, struct trace_export *export)
358{
359 ftrace_exports_enable(export);
360
361 add_trace_export(list, export);
362}
363
364static inline int
365rm_ftrace_export(struct trace_export **list, struct trace_export *export)
366{
367 int ret;
368
369 ret = rm_trace_export(list, export);
370 ftrace_exports_disable(export);
371
372 return ret;
373}
374
375int register_ftrace_export(struct trace_export *export)
376{
377 if (WARN_ON_ONCE(!export->write))
378 return -1;
379
380 mutex_lock(&ftrace_export_lock);
381
382 add_ftrace_export(&ftrace_exports_list, export);
383
384 mutex_unlock(&ftrace_export_lock);
385
386 return 0;
387}
388EXPORT_SYMBOL_GPL(register_ftrace_export);
389
390int unregister_ftrace_export(struct trace_export *export)
391{
392 int ret;
393
394 mutex_lock(&ftrace_export_lock);
395
396 ret = rm_ftrace_export(&ftrace_exports_list, export);
397
398 mutex_unlock(&ftrace_export_lock);
399
400 return ret;
401}
402EXPORT_SYMBOL_GPL(unregister_ftrace_export);
403
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400404/* trace_flags holds trace_options default values */
405#define TRACE_DEFAULT_FLAGS \
406 (FUNCTION_DEFAULT_FLAGS | \
407 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
408 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
409 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
410 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
411
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400412/* trace_options that are only supported by global_trace */
413#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
414 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
415
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400416/* trace_flags that are default zero for instances */
417#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900418 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400419
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200420/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800421 * The global_trace is the descriptor that holds the top-level tracing
422 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200423 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400424static struct trace_array global_trace = {
425 .trace_flags = TRACE_DEFAULT_FLAGS,
426};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200427
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400428LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200429
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400430int trace_array_get(struct trace_array *this_tr)
431{
432 struct trace_array *tr;
433 int ret = -ENODEV;
434
435 mutex_lock(&trace_types_lock);
436 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
437 if (tr == this_tr) {
438 tr->ref++;
439 ret = 0;
440 break;
441 }
442 }
443 mutex_unlock(&trace_types_lock);
444
445 return ret;
446}
447
448static void __trace_array_put(struct trace_array *this_tr)
449{
450 WARN_ON(!this_tr->ref);
451 this_tr->ref--;
452}
453
Divya Indi28879782019-11-20 11:08:38 -0800454/**
455 * trace_array_put - Decrement the reference counter for this trace array.
456 *
457 * NOTE: Use this when we no longer need the trace array returned by
458 * trace_array_get_by_name(). This ensures the trace array can be later
459 * destroyed.
460 *
461 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400462void trace_array_put(struct trace_array *this_tr)
463{
Divya Indi28879782019-11-20 11:08:38 -0800464 if (!this_tr)
465 return;
466
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400467 mutex_lock(&trace_types_lock);
468 __trace_array_put(this_tr);
469 mutex_unlock(&trace_types_lock);
470}
Divya Indi28879782019-11-20 11:08:38 -0800471EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400472
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400473int tracing_check_open_get_tr(struct trace_array *tr)
474{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400475 int ret;
476
477 ret = security_locked_down(LOCKDOWN_TRACEFS);
478 if (ret)
479 return ret;
480
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400481 if (tracing_disabled)
482 return -ENODEV;
483
484 if (tr && trace_array_get(tr) < 0)
485 return -ENODEV;
486
487 return 0;
488}
489
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400490int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500491 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500492 struct ring_buffer_event *event)
493{
494 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
495 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400496 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500497 return 1;
498 }
499
500 return 0;
501}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500502
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400503void trace_free_pid_list(struct trace_pid_list *pid_list)
504{
505 vfree(pid_list->pids);
506 kfree(pid_list);
507}
508
Steven Rostedtd8275c42016-04-14 12:15:22 -0400509/**
510 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
511 * @filtered_pids: The list of pids to check
512 * @search_pid: The PID to find in @filtered_pids
513 *
514 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
515 */
516bool
517trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
518{
519 /*
520 * If pid_max changed after filtered_pids was created, we
521 * by default ignore all pids greater than the previous pid_max.
522 */
523 if (search_pid >= filtered_pids->pid_max)
524 return false;
525
526 return test_bit(search_pid, filtered_pids->pids);
527}
528
529/**
530 * trace_ignore_this_task - should a task be ignored for tracing
531 * @filtered_pids: The list of pids to check
532 * @task: The task that should be ignored if not filtered
533 *
534 * Checks if @task should be traced or not from @filtered_pids.
535 * Returns true if @task should *NOT* be traced.
536 * Returns false if @task should be traced.
537 */
538bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400539trace_ignore_this_task(struct trace_pid_list *filtered_pids,
540 struct trace_pid_list *filtered_no_pids,
541 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400542{
543 /*
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400544 * If filterd_no_pids is not empty, and the task's pid is listed
545 * in filtered_no_pids, then return true.
546 * Otherwise, if filtered_pids is empty, that means we can
547 * trace all tasks. If it has content, then only trace pids
548 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400549 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400550
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400551 return (filtered_pids &&
552 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
553 (filtered_no_pids &&
554 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400555}
556
557/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700558 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400559 * @pid_list: The list to modify
560 * @self: The current task for fork or NULL for exit
561 * @task: The task to add or remove
562 *
563 * If adding a task, if @self is defined, the task is only added if @self
564 * is also included in @pid_list. This happens on fork and tasks should
565 * only be added when the parent is listed. If @self is NULL, then the
566 * @task pid will be removed from the list, which would happen on exit
567 * of a task.
568 */
569void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
570 struct task_struct *self,
571 struct task_struct *task)
572{
573 if (!pid_list)
574 return;
575
576 /* For forks, we only add if the forking task is listed */
577 if (self) {
578 if (!trace_find_filtered_pid(pid_list, self->pid))
579 return;
580 }
581
582 /* Sorry, but we don't support pid_max changing after setting */
583 if (task->pid >= pid_list->pid_max)
584 return;
585
586 /* "self" is set for forks, and NULL for exits */
587 if (self)
588 set_bit(task->pid, pid_list->pids);
589 else
590 clear_bit(task->pid, pid_list->pids);
591}
592
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400593/**
594 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
595 * @pid_list: The pid list to show
596 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
597 * @pos: The position of the file
598 *
599 * This is used by the seq_file "next" operation to iterate the pids
600 * listed in a trace_pid_list structure.
601 *
602 * Returns the pid+1 as we want to display pid of zero, but NULL would
603 * stop the iteration.
604 */
605void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
606{
607 unsigned long pid = (unsigned long)v;
608
609 (*pos)++;
610
611 /* pid already is +1 of the actual prevous bit */
612 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
613
614 /* Return pid + 1 to allow zero to be represented */
615 if (pid < pid_list->pid_max)
616 return (void *)(pid + 1);
617
618 return NULL;
619}
620
621/**
622 * trace_pid_start - Used for seq_file to start reading pid lists
623 * @pid_list: The pid list to show
624 * @pos: The position of the file
625 *
626 * This is used by seq_file "start" operation to start the iteration
627 * of listing pids.
628 *
629 * Returns the pid+1 as we want to display pid of zero, but NULL would
630 * stop the iteration.
631 */
632void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
633{
634 unsigned long pid;
635 loff_t l = 0;
636
637 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
638 if (pid >= pid_list->pid_max)
639 return NULL;
640
641 /* Return pid + 1 so that zero can be the exit value */
642 for (pid++; pid && l < *pos;
643 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
644 ;
645 return (void *)pid;
646}
647
648/**
649 * trace_pid_show - show the current pid in seq_file processing
650 * @m: The seq_file structure to write into
651 * @v: A void pointer of the pid (+1) value to display
652 *
653 * Can be directly used by seq_file operations to display the current
654 * pid value.
655 */
656int trace_pid_show(struct seq_file *m, void *v)
657{
658 unsigned long pid = (unsigned long)v - 1;
659
660 seq_printf(m, "%lu\n", pid);
661 return 0;
662}
663
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400664/* 128 should be much more than enough */
665#define PID_BUF_SIZE 127
666
667int trace_pid_write(struct trace_pid_list *filtered_pids,
668 struct trace_pid_list **new_pid_list,
669 const char __user *ubuf, size_t cnt)
670{
671 struct trace_pid_list *pid_list;
672 struct trace_parser parser;
673 unsigned long val;
674 int nr_pids = 0;
675 ssize_t read = 0;
676 ssize_t ret = 0;
677 loff_t pos;
678 pid_t pid;
679
680 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
681 return -ENOMEM;
682
683 /*
684 * Always recreate a new array. The write is an all or nothing
685 * operation. Always create a new array when adding new pids by
686 * the user. If the operation fails, then the current list is
687 * not modified.
688 */
689 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500690 if (!pid_list) {
691 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400692 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500693 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400694
695 pid_list->pid_max = READ_ONCE(pid_max);
696
697 /* Only truncating will shrink pid_max */
698 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
699 pid_list->pid_max = filtered_pids->pid_max;
700
701 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
702 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500703 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400704 kfree(pid_list);
705 return -ENOMEM;
706 }
707
708 if (filtered_pids) {
709 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000710 for_each_set_bit(pid, filtered_pids->pids,
711 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400712 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400713 nr_pids++;
714 }
715 }
716
717 while (cnt > 0) {
718
719 pos = 0;
720
721 ret = trace_get_user(&parser, ubuf, cnt, &pos);
722 if (ret < 0 || !trace_parser_loaded(&parser))
723 break;
724
725 read += ret;
726 ubuf += ret;
727 cnt -= ret;
728
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400729 ret = -EINVAL;
730 if (kstrtoul(parser.buffer, 0, &val))
731 break;
732 if (val >= pid_list->pid_max)
733 break;
734
735 pid = (pid_t)val;
736
737 set_bit(pid, pid_list->pids);
738 nr_pids++;
739
740 trace_parser_clear(&parser);
741 ret = 0;
742 }
743 trace_parser_put(&parser);
744
745 if (ret < 0) {
746 trace_free_pid_list(pid_list);
747 return ret;
748 }
749
750 if (!nr_pids) {
751 /* Cleared the list of pids */
752 trace_free_pid_list(pid_list);
753 read = ret;
754 pid_list = NULL;
755 }
756
757 *new_pid_list = pid_list;
758
759 return read;
760}
761
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500762static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400763{
764 u64 ts;
765
766 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700767 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400768 return trace_clock_local();
769
Alexander Z Lam94571582013-08-02 18:36:16 -0700770 ts = ring_buffer_time_stamp(buf->buffer, cpu);
771 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400772
773 return ts;
774}
775
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100776u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700777{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500778 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700779}
780
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400781/**
782 * tracing_is_enabled - Show if global_trace has been disabled
783 *
784 * Shows if the global trace has been enabled or not. It uses the
785 * mirror flag "buffer_disabled" to be used in fast paths such as for
786 * the irqsoff tracer. But it may be inaccurate due to races. If you
787 * need to know the accurate state, use tracing_is_on() which is a little
788 * slower, but accurate.
789 */
Steven Rostedt90369902008-11-05 16:05:44 -0500790int tracing_is_enabled(void)
791{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400792 /*
793 * For quick access (irqsoff uses this in fast path), just
794 * return the mirror variable of the state of the ring buffer.
795 * It's a little racy, but we don't really care.
796 */
797 smp_rmb();
798 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500799}
800
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200801/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400802 * trace_buf_size is the size in bytes that is allocated
803 * for a buffer. Note, the number of bytes is always rounded
804 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400805 *
806 * This number is purposely set to a low number of 16384.
807 * If the dump on oops happens, it will be much appreciated
808 * to not have to wait for all that output. Anyway this can be
809 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200810 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400811#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400812
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400813static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200814
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200815/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200816static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200817
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200818/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200819 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200820 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700821DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200822
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800823/*
824 * serialize the access of the ring buffer
825 *
826 * ring buffer serializes readers, but it is low level protection.
827 * The validity of the events (which returns by ring_buffer_peek() ..etc)
828 * are not protected by ring buffer.
829 *
830 * The content of events may become garbage if we allow other process consumes
831 * these events concurrently:
832 * A) the page of the consumed events may become a normal page
833 * (not reader page) in ring buffer, and this page will be rewrited
834 * by events producer.
835 * B) The page of the consumed events may become a page for splice_read,
836 * and this page will be returned to system.
837 *
838 * These primitives allow multi process access to different cpu ring buffer
839 * concurrently.
840 *
841 * These primitives don't distinguish read-only and read-consume access.
842 * Multi read-only access are also serialized.
843 */
844
845#ifdef CONFIG_SMP
846static DECLARE_RWSEM(all_cpu_access_lock);
847static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
848
849static inline void trace_access_lock(int cpu)
850{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500851 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800852 /* gain it for accessing the whole ring buffer. */
853 down_write(&all_cpu_access_lock);
854 } else {
855 /* gain it for accessing a cpu ring buffer. */
856
Steven Rostedtae3b5092013-01-23 15:22:59 -0500857 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800858 down_read(&all_cpu_access_lock);
859
860 /* Secondly block other access to this @cpu ring buffer. */
861 mutex_lock(&per_cpu(cpu_access_lock, cpu));
862 }
863}
864
865static inline void trace_access_unlock(int cpu)
866{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500867 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800868 up_write(&all_cpu_access_lock);
869 } else {
870 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
871 up_read(&all_cpu_access_lock);
872 }
873}
874
875static inline void trace_access_lock_init(void)
876{
877 int cpu;
878
879 for_each_possible_cpu(cpu)
880 mutex_init(&per_cpu(cpu_access_lock, cpu));
881}
882
883#else
884
885static DEFINE_MUTEX(access_lock);
886
887static inline void trace_access_lock(int cpu)
888{
889 (void)cpu;
890 mutex_lock(&access_lock);
891}
892
893static inline void trace_access_unlock(int cpu)
894{
895 (void)cpu;
896 mutex_unlock(&access_lock);
897}
898
899static inline void trace_access_lock_init(void)
900{
901}
902
903#endif
904
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400905#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500906static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400907 unsigned long flags,
908 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400909static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400911 unsigned long flags,
912 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400913
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400914#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500915static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400916 unsigned long flags,
917 int skip, int pc, struct pt_regs *regs)
918{
919}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400920static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500921 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400922 unsigned long flags,
923 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400924{
925}
926
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400927#endif
928
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500929static __always_inline void
930trace_event_setup(struct ring_buffer_event *event,
931 int type, unsigned long flags, int pc)
932{
933 struct trace_entry *ent = ring_buffer_event_data(event);
934
Cong Wang46710f32019-05-25 09:57:59 -0700935 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500936}
937
938static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500939__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500940 int type,
941 unsigned long len,
942 unsigned long flags, int pc)
943{
944 struct ring_buffer_event *event;
945
946 event = ring_buffer_lock_reserve(buffer, len);
947 if (event != NULL)
948 trace_event_setup(event, type, flags, pc);
949
950 return event;
951}
952
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400953void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400954{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500955 if (tr->array_buffer.buffer)
956 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400957 /*
958 * This flag is looked at when buffers haven't been allocated
959 * yet, or by some tracers (like irqsoff), that just want to
960 * know if the ring buffer has been disabled, but it can handle
961 * races of where it gets disabled but we still do a record.
962 * As the check is in the fast path of the tracers, it is more
963 * important to be fast than accurate.
964 */
965 tr->buffer_disabled = 0;
966 /* Make the flag seen by readers */
967 smp_wmb();
968}
969
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200970/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500971 * tracing_on - enable tracing buffers
972 *
973 * This function enables tracing buffers that may have been
974 * disabled with tracing_off.
975 */
976void tracing_on(void)
977{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400978 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500979}
980EXPORT_SYMBOL_GPL(tracing_on);
981
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500982
983static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500984__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500985{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700986 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500987
988 /* If this is the temp buffer, we need to commit fully */
989 if (this_cpu_read(trace_buffered_event) == event) {
990 /* Length is in event->array[0] */
991 ring_buffer_write(buffer, event->array[0], &event->array[1]);
992 /* Release the temp buffer */
993 this_cpu_dec(trace_buffered_event_cnt);
994 } else
995 ring_buffer_unlock_commit(buffer, event);
996}
997
Steven Rostedt499e5472012-02-22 15:50:28 -0500998/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500999 * __trace_puts - write a constant string into the trace buffer.
1000 * @ip: The address of the caller
1001 * @str: The constant string to write
1002 * @size: The size of the string.
1003 */
1004int __trace_puts(unsigned long ip, const char *str, int size)
1005{
1006 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001007 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001008 struct print_entry *entry;
1009 unsigned long irq_flags;
1010 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001011 int pc;
1012
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001013 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001014 return 0;
1015
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001016 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001017
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001018 if (unlikely(tracing_selftest_running || tracing_disabled))
1019 return 0;
1020
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001021 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1022
1023 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001024 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001025 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001026 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1027 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001028 if (!event) {
1029 size = 0;
1030 goto out;
1031 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001032
1033 entry = ring_buffer_event_data(event);
1034 entry->ip = ip;
1035
1036 memcpy(&entry->buf, str, size);
1037
1038 /* Add a newline if necessary */
1039 if (entry->buf[size - 1] != '\n') {
1040 entry->buf[size] = '\n';
1041 entry->buf[size + 1] = '\0';
1042 } else
1043 entry->buf[size] = '\0';
1044
1045 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001046 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001047 out:
1048 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001049 return size;
1050}
1051EXPORT_SYMBOL_GPL(__trace_puts);
1052
1053/**
1054 * __trace_bputs - write the pointer to a constant string into trace buffer
1055 * @ip: The address of the caller
1056 * @str: The constant string to write to the buffer to
1057 */
1058int __trace_bputs(unsigned long ip, const char *str)
1059{
1060 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001061 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001062 struct bputs_entry *entry;
1063 unsigned long irq_flags;
1064 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001065 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001066 int pc;
1067
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001068 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001069 return 0;
1070
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001071 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001072
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001073 if (unlikely(tracing_selftest_running || tracing_disabled))
1074 return 0;
1075
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001076 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001077 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001078
1079 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001080 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1081 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001082 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001083 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001084
1085 entry = ring_buffer_event_data(event);
1086 entry->ip = ip;
1087 entry->str = str;
1088
1089 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001090 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001091
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001092 ret = 1;
1093 out:
1094 ring_buffer_nest_end(buffer);
1095 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001096}
1097EXPORT_SYMBOL_GPL(__trace_bputs);
1098
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001099#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001100static void tracing_snapshot_instance_cond(struct trace_array *tr,
1101 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001102{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001103 struct tracer *tracer = tr->current_trace;
1104 unsigned long flags;
1105
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001106 if (in_nmi()) {
1107 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1108 internal_trace_puts("*** snapshot is being ignored ***\n");
1109 return;
1110 }
1111
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001112 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001113 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1114 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001115 tracing_off();
1116 return;
1117 }
1118
1119 /* Note, snapshot can not be used when the tracer uses it */
1120 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001121 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1122 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001123 return;
1124 }
1125
1126 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001127 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001128 local_irq_restore(flags);
1129}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001130
Tom Zanussia35873a2019-02-13 17:42:45 -06001131void tracing_snapshot_instance(struct trace_array *tr)
1132{
1133 tracing_snapshot_instance_cond(tr, NULL);
1134}
1135
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001136/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001137 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001138 *
1139 * This causes a swap between the snapshot buffer and the current live
1140 * tracing buffer. You can use this to take snapshots of the live
1141 * trace when some condition is triggered, but continue to trace.
1142 *
1143 * Note, make sure to allocate the snapshot with either
1144 * a tracing_snapshot_alloc(), or by doing it manually
1145 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1146 *
1147 * If the snapshot buffer is not allocated, it will stop tracing.
1148 * Basically making a permanent snapshot.
1149 */
1150void tracing_snapshot(void)
1151{
1152 struct trace_array *tr = &global_trace;
1153
1154 tracing_snapshot_instance(tr);
1155}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001156EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001157
Tom Zanussia35873a2019-02-13 17:42:45 -06001158/**
1159 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1160 * @tr: The tracing instance to snapshot
1161 * @cond_data: The data to be tested conditionally, and possibly saved
1162 *
1163 * This is the same as tracing_snapshot() except that the snapshot is
1164 * conditional - the snapshot will only happen if the
1165 * cond_snapshot.update() implementation receiving the cond_data
1166 * returns true, which means that the trace array's cond_snapshot
1167 * update() operation used the cond_data to determine whether the
1168 * snapshot should be taken, and if it was, presumably saved it along
1169 * with the snapshot.
1170 */
1171void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1172{
1173 tracing_snapshot_instance_cond(tr, cond_data);
1174}
1175EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1176
1177/**
1178 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1179 * @tr: The tracing instance
1180 *
1181 * When the user enables a conditional snapshot using
1182 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1183 * with the snapshot. This accessor is used to retrieve it.
1184 *
1185 * Should not be called from cond_snapshot.update(), since it takes
1186 * the tr->max_lock lock, which the code calling
1187 * cond_snapshot.update() has already done.
1188 *
1189 * Returns the cond_data associated with the trace array's snapshot.
1190 */
1191void *tracing_cond_snapshot_data(struct trace_array *tr)
1192{
1193 void *cond_data = NULL;
1194
1195 arch_spin_lock(&tr->max_lock);
1196
1197 if (tr->cond_snapshot)
1198 cond_data = tr->cond_snapshot->cond_data;
1199
1200 arch_spin_unlock(&tr->max_lock);
1201
1202 return cond_data;
1203}
1204EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1205
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001206static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1207 struct array_buffer *size_buf, int cpu_id);
1208static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001209
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001210int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001211{
1212 int ret;
1213
1214 if (!tr->allocated_snapshot) {
1215
1216 /* allocate spare buffer */
1217 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001218 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001219 if (ret < 0)
1220 return ret;
1221
1222 tr->allocated_snapshot = true;
1223 }
1224
1225 return 0;
1226}
1227
Fabian Frederickad1438a2014-04-17 21:44:42 +02001228static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001229{
1230 /*
1231 * We don't free the ring buffer. instead, resize it because
1232 * The max_tr ring buffer has some state (e.g. ring->clock) and
1233 * we want preserve it.
1234 */
1235 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1236 set_buffer_entries(&tr->max_buffer, 1);
1237 tracing_reset_online_cpus(&tr->max_buffer);
1238 tr->allocated_snapshot = false;
1239}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001240
1241/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001242 * tracing_alloc_snapshot - allocate snapshot buffer.
1243 *
1244 * This only allocates the snapshot buffer if it isn't already
1245 * allocated - it doesn't also take a snapshot.
1246 *
1247 * This is meant to be used in cases where the snapshot buffer needs
1248 * to be set up for events that can't sleep but need to be able to
1249 * trigger a snapshot.
1250 */
1251int tracing_alloc_snapshot(void)
1252{
1253 struct trace_array *tr = &global_trace;
1254 int ret;
1255
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001256 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001257 WARN_ON(ret < 0);
1258
1259 return ret;
1260}
1261EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1262
1263/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001264 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001266 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001267 * snapshot buffer if it isn't already allocated. Use this only
1268 * where it is safe to sleep, as the allocation may sleep.
1269 *
1270 * This causes a swap between the snapshot buffer and the current live
1271 * tracing buffer. You can use this to take snapshots of the live
1272 * trace when some condition is triggered, but continue to trace.
1273 */
1274void tracing_snapshot_alloc(void)
1275{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001276 int ret;
1277
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001278 ret = tracing_alloc_snapshot();
1279 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001280 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001281
1282 tracing_snapshot();
1283}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001284EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001285
1286/**
1287 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1288 * @tr: The tracing instance
1289 * @cond_data: User data to associate with the snapshot
1290 * @update: Implementation of the cond_snapshot update function
1291 *
1292 * Check whether the conditional snapshot for the given instance has
1293 * already been enabled, or if the current tracer is already using a
1294 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1295 * save the cond_data and update function inside.
1296 *
1297 * Returns 0 if successful, error otherwise.
1298 */
1299int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1300 cond_update_fn_t update)
1301{
1302 struct cond_snapshot *cond_snapshot;
1303 int ret = 0;
1304
1305 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1306 if (!cond_snapshot)
1307 return -ENOMEM;
1308
1309 cond_snapshot->cond_data = cond_data;
1310 cond_snapshot->update = update;
1311
1312 mutex_lock(&trace_types_lock);
1313
1314 ret = tracing_alloc_snapshot_instance(tr);
1315 if (ret)
1316 goto fail_unlock;
1317
1318 if (tr->current_trace->use_max_tr) {
1319 ret = -EBUSY;
1320 goto fail_unlock;
1321 }
1322
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001323 /*
1324 * The cond_snapshot can only change to NULL without the
1325 * trace_types_lock. We don't care if we race with it going
1326 * to NULL, but we want to make sure that it's not set to
1327 * something other than NULL when we get here, which we can
1328 * do safely with only holding the trace_types_lock and not
1329 * having to take the max_lock.
1330 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001331 if (tr->cond_snapshot) {
1332 ret = -EBUSY;
1333 goto fail_unlock;
1334 }
1335
1336 arch_spin_lock(&tr->max_lock);
1337 tr->cond_snapshot = cond_snapshot;
1338 arch_spin_unlock(&tr->max_lock);
1339
1340 mutex_unlock(&trace_types_lock);
1341
1342 return ret;
1343
1344 fail_unlock:
1345 mutex_unlock(&trace_types_lock);
1346 kfree(cond_snapshot);
1347 return ret;
1348}
1349EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1350
1351/**
1352 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1353 * @tr: The tracing instance
1354 *
1355 * Check whether the conditional snapshot for the given instance is
1356 * enabled; if so, free the cond_snapshot associated with it,
1357 * otherwise return -EINVAL.
1358 *
1359 * Returns 0 if successful, error otherwise.
1360 */
1361int tracing_snapshot_cond_disable(struct trace_array *tr)
1362{
1363 int ret = 0;
1364
1365 arch_spin_lock(&tr->max_lock);
1366
1367 if (!tr->cond_snapshot)
1368 ret = -EINVAL;
1369 else {
1370 kfree(tr->cond_snapshot);
1371 tr->cond_snapshot = NULL;
1372 }
1373
1374 arch_spin_unlock(&tr->max_lock);
1375
1376 return ret;
1377}
1378EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001379#else
1380void tracing_snapshot(void)
1381{
1382 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1383}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001384EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001385void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1386{
1387 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1388}
1389EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001390int tracing_alloc_snapshot(void)
1391{
1392 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1393 return -ENODEV;
1394}
1395EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001396void tracing_snapshot_alloc(void)
1397{
1398 /* Give warning */
1399 tracing_snapshot();
1400}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001401EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001402void *tracing_cond_snapshot_data(struct trace_array *tr)
1403{
1404 return NULL;
1405}
1406EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1407int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1408{
1409 return -ENODEV;
1410}
1411EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1412int tracing_snapshot_cond_disable(struct trace_array *tr)
1413{
1414 return false;
1415}
1416EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001417#endif /* CONFIG_TRACER_SNAPSHOT */
1418
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001419void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001420{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001421 if (tr->array_buffer.buffer)
1422 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001423 /*
1424 * This flag is looked at when buffers haven't been allocated
1425 * yet, or by some tracers (like irqsoff), that just want to
1426 * know if the ring buffer has been disabled, but it can handle
1427 * races of where it gets disabled but we still do a record.
1428 * As the check is in the fast path of the tracers, it is more
1429 * important to be fast than accurate.
1430 */
1431 tr->buffer_disabled = 1;
1432 /* Make the flag seen by readers */
1433 smp_wmb();
1434}
1435
Steven Rostedt499e5472012-02-22 15:50:28 -05001436/**
1437 * tracing_off - turn off tracing buffers
1438 *
1439 * This function stops the tracing buffers from recording data.
1440 * It does not disable any overhead the tracers themselves may
1441 * be causing. This function simply causes all recording to
1442 * the ring buffers to fail.
1443 */
1444void tracing_off(void)
1445{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001446 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001447}
1448EXPORT_SYMBOL_GPL(tracing_off);
1449
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001450void disable_trace_on_warning(void)
1451{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001452 if (__disable_trace_on_warning) {
1453 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1454 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001455 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001456 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001457}
1458
Steven Rostedt499e5472012-02-22 15:50:28 -05001459/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001460 * tracer_tracing_is_on - show real state of ring buffer enabled
1461 * @tr : the trace array to know if ring buffer is enabled
1462 *
1463 * Shows real state of the ring buffer if it is enabled or not.
1464 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001465bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001466{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001467 if (tr->array_buffer.buffer)
1468 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001469 return !tr->buffer_disabled;
1470}
1471
Steven Rostedt499e5472012-02-22 15:50:28 -05001472/**
1473 * tracing_is_on - show state of ring buffers enabled
1474 */
1475int tracing_is_on(void)
1476{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001477 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001478}
1479EXPORT_SYMBOL_GPL(tracing_is_on);
1480
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001481static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001483 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001484
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485 if (!str)
1486 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001487 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001488 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001489 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001490 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001491 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001492 return 1;
1493}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001494__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Tim Bird0e950172010-02-25 15:36:43 -08001496static int __init set_tracing_thresh(char *str)
1497{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001498 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001499 int ret;
1500
1501 if (!str)
1502 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001503 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001504 if (ret < 0)
1505 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001506 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001507 return 1;
1508}
1509__setup("tracing_thresh=", set_tracing_thresh);
1510
Steven Rostedt57f50be2008-05-12 21:20:44 +02001511unsigned long nsecs_to_usecs(unsigned long nsecs)
1512{
1513 return nsecs / 1000;
1514}
1515
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001516/*
1517 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001518 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001519 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001520 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001521 */
1522#undef C
1523#define C(a, b) b
1524
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001525/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001527 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 NULL
1529};
1530
Zhaolei5079f322009-08-25 16:12:56 +08001531static struct {
1532 u64 (*func)(void);
1533 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001534 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001535} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001536 { trace_clock_local, "local", 1 },
1537 { trace_clock_global, "global", 1 },
1538 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001539 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001540 { trace_clock, "perf", 1 },
1541 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001542 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001543 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001544 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001545};
1546
Tom Zanussi860f9f62018-01-15 20:51:48 -06001547bool trace_clock_in_ns(struct trace_array *tr)
1548{
1549 if (trace_clocks[tr->clock_id].in_ns)
1550 return true;
1551
1552 return false;
1553}
1554
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001555/*
1556 * trace_parser_get_init - gets the buffer for trace parser
1557 */
1558int trace_parser_get_init(struct trace_parser *parser, int size)
1559{
1560 memset(parser, 0, sizeof(*parser));
1561
1562 parser->buffer = kmalloc(size, GFP_KERNEL);
1563 if (!parser->buffer)
1564 return 1;
1565
1566 parser->size = size;
1567 return 0;
1568}
1569
1570/*
1571 * trace_parser_put - frees the buffer for trace parser
1572 */
1573void trace_parser_put(struct trace_parser *parser)
1574{
1575 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001576 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001577}
1578
1579/*
1580 * trace_get_user - reads the user input string separated by space
1581 * (matched by isspace(ch))
1582 *
1583 * For each string found the 'struct trace_parser' is updated,
1584 * and the function returns.
1585 *
1586 * Returns number of bytes read.
1587 *
1588 * See kernel/trace/trace.h for 'struct trace_parser' details.
1589 */
1590int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1591 size_t cnt, loff_t *ppos)
1592{
1593 char ch;
1594 size_t read = 0;
1595 ssize_t ret;
1596
1597 if (!*ppos)
1598 trace_parser_clear(parser);
1599
1600 ret = get_user(ch, ubuf++);
1601 if (ret)
1602 goto out;
1603
1604 read++;
1605 cnt--;
1606
1607 /*
1608 * The parser is not finished with the last write,
1609 * continue reading the user input without skipping spaces.
1610 */
1611 if (!parser->cont) {
1612 /* skip white space */
1613 while (cnt && isspace(ch)) {
1614 ret = get_user(ch, ubuf++);
1615 if (ret)
1616 goto out;
1617 read++;
1618 cnt--;
1619 }
1620
Changbin Du76638d92018-01-16 17:02:29 +08001621 parser->idx = 0;
1622
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001624 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001625 *ppos += read;
1626 ret = read;
1627 goto out;
1628 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001629 }
1630
1631 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001632 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001633 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001634 parser->buffer[parser->idx++] = ch;
1635 else {
1636 ret = -EINVAL;
1637 goto out;
1638 }
1639 ret = get_user(ch, ubuf++);
1640 if (ret)
1641 goto out;
1642 read++;
1643 cnt--;
1644 }
1645
1646 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001647 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001648 parser->buffer[parser->idx] = 0;
1649 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001650 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001651 parser->cont = true;
1652 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001653 /* Make sure the parsed string always terminates with '\0'. */
1654 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001655 } else {
1656 ret = -EINVAL;
1657 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001658 }
1659
1660 *ppos += read;
1661 ret = read;
1662
1663out:
1664 return ret;
1665}
1666
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001667/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001668static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001669{
1670 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001671
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001672 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001673 return -EBUSY;
1674
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001675 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001676 if (cnt > len)
1677 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001678 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001679
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001680 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001681 return cnt;
1682}
1683
Tim Bird0e950172010-02-25 15:36:43 -08001684unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001685static const struct file_operations tracing_max_lat_fops;
1686
1687#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1688 defined(CONFIG_FSNOTIFY)
1689
1690static struct workqueue_struct *fsnotify_wq;
1691
1692static void latency_fsnotify_workfn(struct work_struct *work)
1693{
1694 struct trace_array *tr = container_of(work, struct trace_array,
1695 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001696 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001697}
1698
1699static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1700{
1701 struct trace_array *tr = container_of(iwork, struct trace_array,
1702 fsnotify_irqwork);
1703 queue_work(fsnotify_wq, &tr->fsnotify_work);
1704}
1705
1706static void trace_create_maxlat_file(struct trace_array *tr,
1707 struct dentry *d_tracer)
1708{
1709 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1710 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1711 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1712 d_tracer, &tr->max_latency,
1713 &tracing_max_lat_fops);
1714}
1715
1716__init static int latency_fsnotify_init(void)
1717{
1718 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1719 WQ_UNBOUND | WQ_HIGHPRI, 0);
1720 if (!fsnotify_wq) {
1721 pr_err("Unable to allocate tr_max_lat_wq\n");
1722 return -ENOMEM;
1723 }
1724 return 0;
1725}
1726
1727late_initcall_sync(latency_fsnotify_init);
1728
1729void latency_fsnotify(struct trace_array *tr)
1730{
1731 if (!fsnotify_wq)
1732 return;
1733 /*
1734 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1735 * possible that we are called from __schedule() or do_idle(), which
1736 * could cause a deadlock.
1737 */
1738 irq_work_queue(&tr->fsnotify_irqwork);
1739}
1740
1741/*
1742 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1743 * defined(CONFIG_FSNOTIFY)
1744 */
1745#else
1746
1747#define trace_create_maxlat_file(tr, d_tracer) \
1748 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1749 &tr->max_latency, &tracing_max_lat_fops)
1750
1751#endif
Tim Bird0e950172010-02-25 15:36:43 -08001752
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001753#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001754/*
1755 * Copy the new maximum trace into the separate maximum-trace
1756 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001757 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001758 */
1759static void
1760__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1761{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001762 struct array_buffer *trace_buf = &tr->array_buffer;
1763 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001764 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1765 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001766
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001767 max_buf->cpu = cpu;
1768 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001769
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001770 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001771 max_data->critical_start = data->critical_start;
1772 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001773
Tom Zanussi85f726a2019-03-05 10:12:00 -06001774 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001775 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001776 /*
1777 * If tsk == current, then use current_uid(), as that does not use
1778 * RCU. The irq tracer can be called out of RCU scope.
1779 */
1780 if (tsk == current)
1781 max_data->uid = current_uid();
1782 else
1783 max_data->uid = task_uid(tsk);
1784
Steven Rostedt8248ac02009-09-02 12:27:41 -04001785 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1786 max_data->policy = tsk->policy;
1787 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001788
1789 /* record this tasks comm */
1790 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001791 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001792}
1793
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001794/**
1795 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1796 * @tr: tracer
1797 * @tsk: the task with the latency
1798 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001799 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001800 *
1801 * Flip the buffers between the @tr and the max_tr and record information
1802 * about which task was the cause of this latency.
1803 */
Ingo Molnare309b412008-05-12 21:20:51 +02001804void
Tom Zanussia35873a2019-02-13 17:42:45 -06001805update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1806 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001807{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001808 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001809 return;
1810
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001811 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001812
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001813 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001814 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001815 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001816 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001817 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001818
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001819 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001820
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001821 /* Inherit the recordable setting from array_buffer */
1822 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001823 ring_buffer_record_on(tr->max_buffer.buffer);
1824 else
1825 ring_buffer_record_off(tr->max_buffer.buffer);
1826
Tom Zanussia35873a2019-02-13 17:42:45 -06001827#ifdef CONFIG_TRACER_SNAPSHOT
1828 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1829 goto out_unlock;
1830#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001831 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001832
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001833 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001834
1835 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001836 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001837}
1838
1839/**
1840 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001841 * @tr: tracer
1842 * @tsk: task with the latency
1843 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001844 *
1845 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001846 */
Ingo Molnare309b412008-05-12 21:20:51 +02001847void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001848update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1849{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001850 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001852 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001853 return;
1854
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001855 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001856 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001857 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001858 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001859 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001860 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001861
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001862 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001863
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001864 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001865
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001866 if (ret == -EBUSY) {
1867 /*
1868 * We failed to swap the buffer due to a commit taking
1869 * place on this CPU. We fail to record, but we reset
1870 * the max trace buffer (no one writes directly to it)
1871 * and flag that it failed.
1872 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001873 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001874 "Failed to swap buffers due to commit in progress\n");
1875 }
1876
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001877 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001878
1879 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001880 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001881}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001882#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001883
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001884static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001885{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001886 /* Iterators are static, they should be filled or empty */
1887 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001888 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001889
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001890 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001891 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001892}
1893
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001894#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001895static bool selftests_can_run;
1896
1897struct trace_selftests {
1898 struct list_head list;
1899 struct tracer *type;
1900};
1901
1902static LIST_HEAD(postponed_selftests);
1903
1904static int save_selftest(struct tracer *type)
1905{
1906 struct trace_selftests *selftest;
1907
1908 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1909 if (!selftest)
1910 return -ENOMEM;
1911
1912 selftest->type = type;
1913 list_add(&selftest->list, &postponed_selftests);
1914 return 0;
1915}
1916
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001917static int run_tracer_selftest(struct tracer *type)
1918{
1919 struct trace_array *tr = &global_trace;
1920 struct tracer *saved_tracer = tr->current_trace;
1921 int ret;
1922
1923 if (!type->selftest || tracing_selftest_disabled)
1924 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001925
1926 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001927 * If a tracer registers early in boot up (before scheduling is
1928 * initialized and such), then do not run its selftests yet.
1929 * Instead, run it a little later in the boot process.
1930 */
1931 if (!selftests_can_run)
1932 return save_selftest(type);
1933
1934 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001935 * Run a selftest on this tracer.
1936 * Here we reset the trace buffer, and set the current
1937 * tracer to be this tracer. The tracer can then run some
1938 * internal tracing to verify that everything is in order.
1939 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001940 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001941 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001942
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001943 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001944
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001945#ifdef CONFIG_TRACER_MAX_TRACE
1946 if (type->use_max_tr) {
1947 /* If we expanded the buffers, make sure the max is expanded too */
1948 if (ring_buffer_expanded)
1949 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1950 RING_BUFFER_ALL_CPUS);
1951 tr->allocated_snapshot = true;
1952 }
1953#endif
1954
1955 /* the test is responsible for initializing and enabling */
1956 pr_info("Testing tracer %s: ", type->name);
1957 ret = type->selftest(type, tr);
1958 /* the test is responsible for resetting too */
1959 tr->current_trace = saved_tracer;
1960 if (ret) {
1961 printk(KERN_CONT "FAILED!\n");
1962 /* Add the warning after printing 'FAILED' */
1963 WARN_ON(1);
1964 return -1;
1965 }
1966 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001967 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001968
1969#ifdef CONFIG_TRACER_MAX_TRACE
1970 if (type->use_max_tr) {
1971 tr->allocated_snapshot = false;
1972
1973 /* Shrink the max buffer again */
1974 if (ring_buffer_expanded)
1975 ring_buffer_resize(tr->max_buffer.buffer, 1,
1976 RING_BUFFER_ALL_CPUS);
1977 }
1978#endif
1979
1980 printk(KERN_CONT "PASSED\n");
1981 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001982}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001983
1984static __init int init_trace_selftests(void)
1985{
1986 struct trace_selftests *p, *n;
1987 struct tracer *t, **last;
1988 int ret;
1989
1990 selftests_can_run = true;
1991
1992 mutex_lock(&trace_types_lock);
1993
1994 if (list_empty(&postponed_selftests))
1995 goto out;
1996
1997 pr_info("Running postponed tracer tests:\n");
1998
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001999 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002000 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01002001 /* This loop can take minutes when sanitizers are enabled, so
2002 * lets make sure we allow RCU processing.
2003 */
2004 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002005 ret = run_tracer_selftest(p->type);
2006 /* If the test fails, then warn and remove from available_tracers */
2007 if (ret < 0) {
2008 WARN(1, "tracer: %s failed selftest, disabling\n",
2009 p->type->name);
2010 last = &trace_types;
2011 for (t = trace_types; t; t = t->next) {
2012 if (t == p->type) {
2013 *last = t->next;
2014 break;
2015 }
2016 last = &t->next;
2017 }
2018 }
2019 list_del(&p->list);
2020 kfree(p);
2021 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002022 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002023
2024 out:
2025 mutex_unlock(&trace_types_lock);
2026
2027 return 0;
2028}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002029core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002030#else
2031static inline int run_tracer_selftest(struct tracer *type)
2032{
2033 return 0;
2034}
2035#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002036
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002037static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2038
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002039static void __init apply_trace_boot_options(void);
2040
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002041/**
2042 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002043 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002044 *
2045 * Register a new plugin tracer.
2046 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002047int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002048{
2049 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002050 int ret = 0;
2051
2052 if (!type->name) {
2053 pr_info("Tracer must have a name\n");
2054 return -1;
2055 }
2056
Dan Carpenter24a461d2010-07-10 12:06:44 +02002057 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002058 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2059 return -1;
2060 }
2061
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002062 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002063 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002064 type->name);
2065 return -EPERM;
2066 }
2067
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002068 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002069
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002070 tracing_selftest_running = true;
2071
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002072 for (t = trace_types; t; t = t->next) {
2073 if (strcmp(type->name, t->name) == 0) {
2074 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002075 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002076 type->name);
2077 ret = -1;
2078 goto out;
2079 }
2080 }
2081
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002082 if (!type->set_flag)
2083 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002084 if (!type->flags) {
2085 /*allocate a dummy tracer_flags*/
2086 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002087 if (!type->flags) {
2088 ret = -ENOMEM;
2089 goto out;
2090 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002091 type->flags->val = 0;
2092 type->flags->opts = dummy_tracer_opt;
2093 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002094 if (!type->flags->opts)
2095 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002096
Chunyu Hud39cdd22016-03-08 21:37:01 +08002097 /* store the tracer for __set_tracer_option */
2098 type->flags->trace = type;
2099
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002100 ret = run_tracer_selftest(type);
2101 if (ret < 0)
2102 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002103
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002104 type->next = trace_types;
2105 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002106 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002107
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002108 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002109 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002110 mutex_unlock(&trace_types_lock);
2111
Steven Rostedtdac74942009-02-05 01:13:38 -05002112 if (ret || !default_bootup_tracer)
2113 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002114
Li Zefanee6c2c12009-09-18 14:06:47 +08002115 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002116 goto out_unlock;
2117
2118 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2119 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002120 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002121 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002122
2123 apply_trace_boot_options();
2124
Steven Rostedtdac74942009-02-05 01:13:38 -05002125 /* disable other selftests, since this will break it. */
Masami Hiramatsu60efe212020-12-08 17:54:09 +09002126 disable_tracing_selftest("running a tracer");
Steven Rostedtdac74942009-02-05 01:13:38 -05002127
2128 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002129 return ret;
2130}
2131
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002132static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002133{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002134 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002135
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002136 if (!buffer)
2137 return;
2138
Steven Rostedtf6339032009-09-04 12:35:16 -04002139 ring_buffer_record_disable(buffer);
2140
2141 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002142 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002143 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002144
2145 ring_buffer_record_enable(buffer);
2146}
2147
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002148void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002149{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002150 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002151
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002152 if (!buffer)
2153 return;
2154
Steven Rostedt621968c2009-09-04 12:02:35 -04002155 ring_buffer_record_disable(buffer);
2156
2157 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002158 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002159
Alexander Z Lam94571582013-08-02 18:36:16 -07002160 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002161
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002162 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002163
2164 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002165}
2166
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002167/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002168void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002169{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002170 struct trace_array *tr;
2171
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002172 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002173 if (!tr->clear_trace)
2174 continue;
2175 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002176 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002177#ifdef CONFIG_TRACER_MAX_TRACE
2178 tracing_reset_online_cpus(&tr->max_buffer);
2179#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002180 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002181}
2182
Joel Fernandesd914ba32017-06-26 19:01:55 -07002183static int *tgid_map;
2184
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002185#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002186#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002187static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002188struct saved_cmdlines_buffer {
2189 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2190 unsigned *map_cmdline_to_pid;
2191 unsigned cmdline_num;
2192 int cmdline_idx;
2193 char *saved_cmdlines;
2194};
2195static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002196
Steven Rostedt25b0b442008-05-12 21:21:00 +02002197/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002198static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002199
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002200static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002201{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002202 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2203}
2204
2205static inline void set_cmdline(int idx, const char *cmdline)
2206{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002207 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002208}
2209
2210static int allocate_cmdlines_buffer(unsigned int val,
2211 struct saved_cmdlines_buffer *s)
2212{
Kees Cook6da2ec52018-06-12 13:55:00 -07002213 s->map_cmdline_to_pid = kmalloc_array(val,
2214 sizeof(*s->map_cmdline_to_pid),
2215 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002216 if (!s->map_cmdline_to_pid)
2217 return -ENOMEM;
2218
Kees Cook6da2ec52018-06-12 13:55:00 -07002219 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002220 if (!s->saved_cmdlines) {
2221 kfree(s->map_cmdline_to_pid);
2222 return -ENOMEM;
2223 }
2224
2225 s->cmdline_idx = 0;
2226 s->cmdline_num = val;
2227 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2228 sizeof(s->map_pid_to_cmdline));
2229 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2230 val * sizeof(*s->map_cmdline_to_pid));
2231
2232 return 0;
2233}
2234
2235static int trace_create_savedcmd(void)
2236{
2237 int ret;
2238
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002239 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002240 if (!savedcmd)
2241 return -ENOMEM;
2242
2243 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2244 if (ret < 0) {
2245 kfree(savedcmd);
2246 savedcmd = NULL;
2247 return -ENOMEM;
2248 }
2249
2250 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002251}
2252
Carsten Emdeb5130b12009-09-13 01:43:07 +02002253int is_tracing_stopped(void)
2254{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002255 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002256}
2257
Steven Rostedt0f048702008-11-05 16:05:44 -05002258/**
2259 * tracing_start - quick start of the tracer
2260 *
2261 * If tracing is enabled but was stopped by tracing_stop,
2262 * this will start the tracer back up.
2263 */
2264void tracing_start(void)
2265{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002266 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002267 unsigned long flags;
2268
2269 if (tracing_disabled)
2270 return;
2271
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002272 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2273 if (--global_trace.stop_count) {
2274 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002275 /* Someone screwed up their debugging */
2276 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002277 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002278 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002279 goto out;
2280 }
2281
Steven Rostedta2f80712010-03-12 19:56:00 -05002282 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002283 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002284
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002285 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002286 if (buffer)
2287 ring_buffer_record_enable(buffer);
2288
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002289#ifdef CONFIG_TRACER_MAX_TRACE
2290 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002291 if (buffer)
2292 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002293#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002294
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002295 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002296
Steven Rostedt0f048702008-11-05 16:05:44 -05002297 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002298 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2299}
2300
2301static void tracing_start_tr(struct trace_array *tr)
2302{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002303 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002304 unsigned long flags;
2305
2306 if (tracing_disabled)
2307 return;
2308
2309 /* If global, we need to also start the max tracer */
2310 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2311 return tracing_start();
2312
2313 raw_spin_lock_irqsave(&tr->start_lock, flags);
2314
2315 if (--tr->stop_count) {
2316 if (tr->stop_count < 0) {
2317 /* Someone screwed up their debugging */
2318 WARN_ON_ONCE(1);
2319 tr->stop_count = 0;
2320 }
2321 goto out;
2322 }
2323
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002324 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002325 if (buffer)
2326 ring_buffer_record_enable(buffer);
2327
2328 out:
2329 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002330}
2331
2332/**
2333 * tracing_stop - quick stop of the tracer
2334 *
2335 * Light weight way to stop tracing. Use in conjunction with
2336 * tracing_start.
2337 */
2338void tracing_stop(void)
2339{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002340 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002341 unsigned long flags;
2342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002343 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2344 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002345 goto out;
2346
Steven Rostedta2f80712010-03-12 19:56:00 -05002347 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002348 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002349
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002350 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002351 if (buffer)
2352 ring_buffer_record_disable(buffer);
2353
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002354#ifdef CONFIG_TRACER_MAX_TRACE
2355 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002356 if (buffer)
2357 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002358#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002359
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002360 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002361
Steven Rostedt0f048702008-11-05 16:05:44 -05002362 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002363 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2364}
2365
2366static void tracing_stop_tr(struct trace_array *tr)
2367{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002368 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002369 unsigned long flags;
2370
2371 /* If global, we need to also stop the max tracer */
2372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2373 return tracing_stop();
2374
2375 raw_spin_lock_irqsave(&tr->start_lock, flags);
2376 if (tr->stop_count++)
2377 goto out;
2378
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002379 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002380 if (buffer)
2381 ring_buffer_record_disable(buffer);
2382
2383 out:
2384 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002385}
2386
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002387static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388{
Carsten Emdea635cf02009-03-18 09:00:41 +01002389 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002391 /* treat recording of idle task as a success */
2392 if (!tsk->pid)
2393 return 1;
2394
2395 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002396 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397
2398 /*
2399 * It's not the end of the world if we don't get
2400 * the lock, but we also don't want to spin
2401 * nor do we want to disable interrupts,
2402 * so if we miss here, then better luck next time.
2403 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002404 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002405 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002406
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002407 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002408 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002409 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410
Carsten Emdea635cf02009-03-18 09:00:41 +01002411 /*
2412 * Check whether the cmdline buffer at idx has a pid
2413 * mapped. We are going to overwrite that entry so we
2414 * need to clear the map_pid_to_cmdline. Otherwise we
2415 * would read the new comm for the old pid.
2416 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002417 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002418 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002419 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002421 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2422 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002424 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425 }
2426
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002427 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002429 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002430
2431 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432}
2433
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002434static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002436 unsigned map;
2437
Steven Rostedt4ca530852009-03-16 19:20:15 -04002438 if (!pid) {
2439 strcpy(comm, "<idle>");
2440 return;
2441 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Steven Rostedt74bf4072010-01-25 15:11:53 -05002443 if (WARN_ON_ONCE(pid < 0)) {
2444 strcpy(comm, "<XXX>");
2445 return;
2446 }
2447
Steven Rostedt4ca530852009-03-16 19:20:15 -04002448 if (pid > PID_MAX_DEFAULT) {
2449 strcpy(comm, "<...>");
2450 return;
2451 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002452
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002453 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002454 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302455 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002456 else
2457 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002458}
2459
2460void trace_find_cmdline(int pid, char comm[])
2461{
2462 preempt_disable();
2463 arch_spin_lock(&trace_cmdline_lock);
2464
2465 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002466
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002467 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002468 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469}
2470
Joel Fernandesd914ba32017-06-26 19:01:55 -07002471int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002472{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002473 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2474 return 0;
2475
2476 return tgid_map[pid];
2477}
2478
2479static int trace_save_tgid(struct task_struct *tsk)
2480{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002481 /* treat recording of idle task as a success */
2482 if (!tsk->pid)
2483 return 1;
2484
2485 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002486 return 0;
2487
2488 tgid_map[tsk->pid] = tsk->tgid;
2489 return 1;
2490}
2491
2492static bool tracing_record_taskinfo_skip(int flags)
2493{
2494 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2495 return true;
2496 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2497 return true;
2498 if (!__this_cpu_read(trace_taskinfo_save))
2499 return true;
2500 return false;
2501}
2502
2503/**
2504 * tracing_record_taskinfo - record the task info of a task
2505 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002506 * @task: task to record
2507 * @flags: TRACE_RECORD_CMDLINE for recording comm
2508 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002509 */
2510void tracing_record_taskinfo(struct task_struct *task, int flags)
2511{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002512 bool done;
2513
Joel Fernandesd914ba32017-06-26 19:01:55 -07002514 if (tracing_record_taskinfo_skip(flags))
2515 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002516
2517 /*
2518 * Record as much task information as possible. If some fail, continue
2519 * to try to record the others.
2520 */
2521 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2522 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2523
2524 /* If recording any information failed, retry again soon. */
2525 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002526 return;
2527
Joel Fernandesd914ba32017-06-26 19:01:55 -07002528 __this_cpu_write(trace_taskinfo_save, false);
2529}
2530
2531/**
2532 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2533 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002534 * @prev: previous task during sched_switch
2535 * @next: next task during sched_switch
2536 * @flags: TRACE_RECORD_CMDLINE for recording comm
2537 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002538 */
2539void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2540 struct task_struct *next, int flags)
2541{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002542 bool done;
2543
Joel Fernandesd914ba32017-06-26 19:01:55 -07002544 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002545 return;
2546
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002547 /*
2548 * Record as much task information as possible. If some fail, continue
2549 * to try to record the others.
2550 */
2551 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2552 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2553 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2554 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002555
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002556 /* If recording any information failed, retry again soon. */
2557 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002558 return;
2559
2560 __this_cpu_write(trace_taskinfo_save, false);
2561}
2562
2563/* Helpers to record a specific task information */
2564void tracing_record_cmdline(struct task_struct *task)
2565{
2566 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2567}
2568
2569void tracing_record_tgid(struct task_struct *task)
2570{
2571 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572}
2573
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002574/*
2575 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2576 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2577 * simplifies those functions and keeps them in sync.
2578 */
2579enum print_line_t trace_handle_return(struct trace_seq *s)
2580{
2581 return trace_seq_has_overflowed(s) ?
2582 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2583}
2584EXPORT_SYMBOL_GPL(trace_handle_return);
2585
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002586void
Cong Wang46710f32019-05-25 09:57:59 -07002587tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2588 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002589{
2590 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002591
Steven Rostedt777e2082008-09-29 23:02:42 -04002592 entry->preempt_count = pc & 0xff;
2593 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002594 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002595 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002596#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002597 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002598#else
2599 TRACE_FLAG_IRQS_NOSUPPORT |
2600#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002601 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302603 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002606}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608
Steven Rostedte77405a2009-09-02 14:17:06 -04002609struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002610trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002611 int type,
2612 unsigned long len,
2613 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002614{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002615 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002616}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002617
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002618DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2619DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2620static int trace_buffered_event_ref;
2621
2622/**
2623 * trace_buffered_event_enable - enable buffering events
2624 *
2625 * When events are being filtered, it is quicker to use a temporary
2626 * buffer to write the event data into if there's a likely chance
2627 * that it will not be committed. The discard of the ring buffer
2628 * is not as fast as committing, and is much slower than copying
2629 * a commit.
2630 *
2631 * When an event is to be filtered, allocate per cpu buffers to
2632 * write the event data into, and if the event is filtered and discarded
2633 * it is simply dropped, otherwise, the entire data is to be committed
2634 * in one shot.
2635 */
2636void trace_buffered_event_enable(void)
2637{
2638 struct ring_buffer_event *event;
2639 struct page *page;
2640 int cpu;
2641
2642 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2643
2644 if (trace_buffered_event_ref++)
2645 return;
2646
2647 for_each_tracing_cpu(cpu) {
2648 page = alloc_pages_node(cpu_to_node(cpu),
2649 GFP_KERNEL | __GFP_NORETRY, 0);
2650 if (!page)
2651 goto failed;
2652
2653 event = page_address(page);
2654 memset(event, 0, sizeof(*event));
2655
2656 per_cpu(trace_buffered_event, cpu) = event;
2657
2658 preempt_disable();
2659 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002660 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002661 per_cpu(trace_buffered_event, cpu))
2662 WARN_ON_ONCE(1);
2663 preempt_enable();
2664 }
2665
2666 return;
2667 failed:
2668 trace_buffered_event_disable();
2669}
2670
2671static void enable_trace_buffered_event(void *data)
2672{
2673 /* Probably not needed, but do it anyway */
2674 smp_rmb();
2675 this_cpu_dec(trace_buffered_event_cnt);
2676}
2677
2678static void disable_trace_buffered_event(void *data)
2679{
2680 this_cpu_inc(trace_buffered_event_cnt);
2681}
2682
2683/**
2684 * trace_buffered_event_disable - disable buffering events
2685 *
2686 * When a filter is removed, it is faster to not use the buffered
2687 * events, and to commit directly into the ring buffer. Free up
2688 * the temp buffers when there are no more users. This requires
2689 * special synchronization with current events.
2690 */
2691void trace_buffered_event_disable(void)
2692{
2693 int cpu;
2694
2695 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2696
2697 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2698 return;
2699
2700 if (--trace_buffered_event_ref)
2701 return;
2702
2703 preempt_disable();
2704 /* For each CPU, set the buffer as used. */
2705 smp_call_function_many(tracing_buffer_mask,
2706 disable_trace_buffered_event, NULL, 1);
2707 preempt_enable();
2708
2709 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002710 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002711
2712 for_each_tracing_cpu(cpu) {
2713 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2714 per_cpu(trace_buffered_event, cpu) = NULL;
2715 }
2716 /*
2717 * Make sure trace_buffered_event is NULL before clearing
2718 * trace_buffered_event_cnt.
2719 */
2720 smp_wmb();
2721
2722 preempt_disable();
2723 /* Do the work on each cpu */
2724 smp_call_function_many(tracing_buffer_mask,
2725 enable_trace_buffered_event, NULL, 1);
2726 preempt_enable();
2727}
2728
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002729static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002730
Steven Rostedtef5580d2009-02-27 19:38:04 -05002731struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002732trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002733 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002734 int type, unsigned long len,
2735 unsigned long flags, int pc)
2736{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002737 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002738 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002739
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002740 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002741
Tom Zanussi00b41452018-01-15 20:51:39 -06002742 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002743 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2744 (entry = this_cpu_read(trace_buffered_event))) {
2745 /* Try to use the per cpu buffer first */
2746 val = this_cpu_inc_return(trace_buffered_event_cnt);
2747 if (val == 1) {
2748 trace_event_setup(entry, type, flags, pc);
2749 entry->array[0] = len;
2750 return entry;
2751 }
2752 this_cpu_dec(trace_buffered_event_cnt);
2753 }
2754
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002755 entry = __trace_buffer_lock_reserve(*current_rb,
2756 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002757 /*
2758 * If tracing is off, but we have triggers enabled
2759 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002760 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002761 * safe and will not be recorded anywhere.
2762 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002763 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002764 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002765 entry = __trace_buffer_lock_reserve(*current_rb,
2766 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002767 }
2768 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002769}
2770EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2771
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002772static DEFINE_SPINLOCK(tracepoint_iter_lock);
2773static DEFINE_MUTEX(tracepoint_printk_mutex);
2774
2775static void output_printk(struct trace_event_buffer *fbuffer)
2776{
2777 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002778 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002779 struct trace_event *event;
2780 unsigned long flags;
2781 struct trace_iterator *iter = tracepoint_print_iter;
2782
2783 /* We should never get here if iter is NULL */
2784 if (WARN_ON_ONCE(!iter))
2785 return;
2786
2787 event_call = fbuffer->trace_file->event_call;
2788 if (!event_call || !event_call->event.funcs ||
2789 !event_call->event.funcs->trace)
2790 return;
2791
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002792 file = fbuffer->trace_file;
2793 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2794 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2795 !filter_match_preds(file->filter, fbuffer->entry)))
2796 return;
2797
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002798 event = &fbuffer->trace_file->event_call->event;
2799
2800 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2801 trace_seq_init(&iter->seq);
2802 iter->ent = fbuffer->entry;
2803 event_call->event.funcs->trace(iter, 0, event);
2804 trace_seq_putc(&iter->seq, 0);
2805 printk("%s", iter->seq.buffer);
2806
2807 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2808}
2809
2810int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002811 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002812 loff_t *ppos)
2813{
2814 int save_tracepoint_printk;
2815 int ret;
2816
2817 mutex_lock(&tracepoint_printk_mutex);
2818 save_tracepoint_printk = tracepoint_printk;
2819
2820 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2821
2822 /*
2823 * This will force exiting early, as tracepoint_printk
2824 * is always zero when tracepoint_printk_iter is not allocated
2825 */
2826 if (!tracepoint_print_iter)
2827 tracepoint_printk = 0;
2828
2829 if (save_tracepoint_printk == tracepoint_printk)
2830 goto out;
2831
2832 if (tracepoint_printk)
2833 static_key_enable(&tracepoint_printk_key.key);
2834 else
2835 static_key_disable(&tracepoint_printk_key.key);
2836
2837 out:
2838 mutex_unlock(&tracepoint_printk_mutex);
2839
2840 return ret;
2841}
2842
2843void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2844{
2845 if (static_key_false(&tracepoint_printk_key.key))
2846 output_printk(fbuffer);
2847
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002848 if (static_branch_unlikely(&trace_event_exports_enabled))
2849 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002850 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002851 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002852 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002853}
2854EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2855
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002856/*
2857 * Skip 3:
2858 *
2859 * trace_buffer_unlock_commit_regs()
2860 * trace_event_buffer_commit()
2861 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302862 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002863# define STACK_SKIP 3
2864
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002865void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002866 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002867 struct ring_buffer_event *event,
2868 unsigned long flags, int pc,
2869 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002870{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002871 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002872
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002873 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002874 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002875 * Note, we can still get here via blktrace, wakeup tracer
2876 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002877 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002878 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002879 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002880 ftrace_trace_userstack(buffer, flags, pc);
2881}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002882
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002883/*
2884 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2885 */
2886void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002887trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002888 struct ring_buffer_event *event)
2889{
2890 __buffer_unlock_commit(buffer, event);
2891}
2892
Ingo Molnare309b412008-05-12 21:20:51 +02002893void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002894trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002895 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2896 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002897{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002898 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002899 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002900 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002901 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002902
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002903 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2904 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002905 if (!event)
2906 return;
2907 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002908 entry->ip = ip;
2909 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002910
Chunyan Zhang478409d2016-11-21 15:57:18 +08002911 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002912 if (static_branch_unlikely(&trace_function_exports_enabled))
2913 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002914 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002915 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002916}
2917
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002918#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002919
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002920/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2921#define FTRACE_KSTACK_NESTING 4
2922
2923#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2924
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002925struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002926 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002927};
2928
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002929
2930struct ftrace_stacks {
2931 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2932};
2933
2934static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002935static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2936
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002937static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002938 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002939 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002940{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002941 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002942 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002943 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002944 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002945 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002946 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002947
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002948 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002949 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002950 * If regs is set, then these functions will not be in the way.
2951 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002952#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002953 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002954 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002955#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002956
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002957 preempt_disable_notrace();
2958
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002959 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2960
2961 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08002962 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002963 goto out;
2964
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002965 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002966 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2967 * interrupt will either see the value pre increment or post
2968 * increment. If the interrupt happens pre increment it will have
2969 * restored the counter when it returns. We just need a barrier to
2970 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002971 */
2972 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002973
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002974 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002975 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002976
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002977 if (regs) {
2978 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2979 size, skip);
2980 } else {
2981 nr_entries = stack_trace_save(fstack->calls, size, skip);
2982 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002983
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002984 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002985 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2986 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002987 if (!event)
2988 goto out;
2989 entry = ring_buffer_event_data(event);
2990
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002991 memcpy(&entry->caller, fstack->calls, size);
2992 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002993
Tom Zanussif306cc82013-10-24 08:34:17 -05002994 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002995 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002996
2997 out:
2998 /* Again, don't let gcc optimize things here */
2999 barrier();
Shan Wei82146522012-11-19 13:21:01 +08003000 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003001 preempt_enable_notrace();
3002
Ingo Molnarf0a920d2008-05-12 21:20:47 +02003003}
3004
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003005static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003006 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003007 unsigned long flags,
3008 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003009{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003010 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003011 return;
3012
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003013 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003014}
3015
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003016void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3017 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04003018{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003019 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003020
3021 if (rcu_is_watching()) {
3022 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3023 return;
3024 }
3025
3026 /*
3027 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3028 * but if the above rcu_is_watching() failed, then the NMI
3029 * triggered someplace critical, and rcu_irq_enter() should
3030 * not be called from NMI.
3031 */
3032 if (unlikely(in_nmi()))
3033 return;
3034
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003035 rcu_irq_enter_irqson();
3036 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3037 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003038}
3039
Steven Rostedt03889382009-12-11 09:48:22 -05003040/**
3041 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003042 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003043 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003044void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003045{
3046 unsigned long flags;
3047
3048 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003049 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003050
3051 local_save_flags(flags);
3052
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003053#ifndef CONFIG_UNWINDER_ORC
3054 /* Skip 1 to skip this function. */
3055 skip++;
3056#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003057 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003058 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003059}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003060EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003061
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003062#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003063static DEFINE_PER_CPU(int, user_stack_count);
3064
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003065static void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003066ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003067{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003068 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003069 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003070 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003071
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003072 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003073 return;
3074
Steven Rostedtb6345872010-03-12 20:03:30 -05003075 /*
3076 * NMIs can not handle page faults, even with fix ups.
3077 * The save user stack can (and often does) fault.
3078 */
3079 if (unlikely(in_nmi()))
3080 return;
3081
Steven Rostedt91e86e52010-11-10 12:56:12 +01003082 /*
3083 * prevent recursion, since the user stack tracing may
3084 * trigger other kernel events.
3085 */
3086 preempt_disable();
3087 if (__this_cpu_read(user_stack_count))
3088 goto out;
3089
3090 __this_cpu_inc(user_stack_count);
3091
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003092 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3093 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003094 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003095 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003096 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003097
Steven Rostedt48659d32009-09-11 11:36:23 -04003098 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003099 memset(&entry->caller, 0, sizeof(entry->caller));
3100
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003101 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003102 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003103 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003104
Li Zefan1dbd1952010-12-09 15:47:56 +08003105 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003106 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003107 out:
3108 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003109}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003110#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003111static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003112 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003113{
Török Edwin02b67512008-11-22 13:28:47 +02003114}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003115#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003116
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003117#endif /* CONFIG_STACKTRACE */
3118
Steven Rostedt07d777f2011-09-22 14:01:55 -04003119/* created for use with alloc_percpu */
3120struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003121 int nesting;
3122 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003123};
3124
3125static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003126
3127/*
Qiujun Huang2b5894c2020-10-29 23:05:54 +08003128 * This allows for lockless recording. If we're nested too deeply, then
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003129 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003130 */
3131static char *get_trace_buf(void)
3132{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003133 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003134
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003135 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003136 return NULL;
3137
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003138 buffer->nesting++;
3139
3140 /* Interrupts must see nesting incremented before we use the buffer */
3141 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003142 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003143}
3144
3145static void put_trace_buf(void)
3146{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003147 /* Don't let the decrement of nesting leak before this */
3148 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003149 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003150}
3151
3152static int alloc_percpu_trace_buffer(void)
3153{
3154 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003155
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003156 if (trace_percpu_buffer)
3157 return 0;
3158
Steven Rostedt07d777f2011-09-22 14:01:55 -04003159 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003160 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003161 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003162
3163 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003164 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003165}
3166
Steven Rostedt81698832012-10-11 10:15:05 -04003167static int buffers_allocated;
3168
Steven Rostedt07d777f2011-09-22 14:01:55 -04003169void trace_printk_init_buffers(void)
3170{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003171 if (buffers_allocated)
3172 return;
3173
3174 if (alloc_percpu_trace_buffer())
3175 return;
3176
Steven Rostedt2184db42014-05-28 13:14:40 -04003177 /* trace_printk() is for debug use only. Don't use it in production. */
3178
Joe Perchesa395d6a2016-03-22 14:28:09 -07003179 pr_warn("\n");
3180 pr_warn("**********************************************************\n");
3181 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3182 pr_warn("** **\n");
3183 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3184 pr_warn("** **\n");
3185 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3186 pr_warn("** unsafe for production use. **\n");
3187 pr_warn("** **\n");
3188 pr_warn("** If you see this message and you are not debugging **\n");
3189 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3190 pr_warn("** **\n");
3191 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3192 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003193
Steven Rostedtb382ede62012-10-10 21:44:34 -04003194 /* Expand the buffers to set size */
3195 tracing_update_buffers();
3196
Steven Rostedt07d777f2011-09-22 14:01:55 -04003197 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003198
3199 /*
3200 * trace_printk_init_buffers() can be called by modules.
3201 * If that happens, then we need to start cmdline recording
3202 * directly here. If the global_trace.buffer is already
3203 * allocated here, then this was called by module code.
3204 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003205 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003206 tracing_start_cmdline_record();
3207}
Divya Indif45d1222019-03-20 11:28:51 -07003208EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003209
3210void trace_printk_start_comm(void)
3211{
3212 /* Start tracing comms if trace printk is set */
3213 if (!buffers_allocated)
3214 return;
3215 tracing_start_cmdline_record();
3216}
3217
3218static void trace_printk_start_stop_comm(int enabled)
3219{
3220 if (!buffers_allocated)
3221 return;
3222
3223 if (enabled)
3224 tracing_start_cmdline_record();
3225 else
3226 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003227}
3228
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003229/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003230 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003231 * @ip: The address of the caller
3232 * @fmt: The string format to write to the buffer
3233 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003234 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003235int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003236{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003237 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003239 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003240 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003241 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003242 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003243 char *tbuffer;
3244 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003245
3246 if (unlikely(tracing_selftest_running || tracing_disabled))
3247 return 0;
3248
3249 /* Don't pollute graph traces with trace_vprintk internals */
3250 pause_graph_tracing();
3251
3252 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003253 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003254
Steven Rostedt07d777f2011-09-22 14:01:55 -04003255 tbuffer = get_trace_buf();
3256 if (!tbuffer) {
3257 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003258 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003259 }
3260
3261 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3262
3263 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003264 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003265
Steven Rostedt07d777f2011-09-22 14:01:55 -04003266 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003267 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003268 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003269 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003270 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3271 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003272 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003273 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003274 entry = ring_buffer_event_data(event);
3275 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003276 entry->fmt = fmt;
3277
Steven Rostedt07d777f2011-09-22 14:01:55 -04003278 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003279 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003280 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003281 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003282 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003283
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003284out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003285 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003286out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003287 put_trace_buf();
3288
3289out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003290 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003291 unpause_graph_tracing();
3292
3293 return len;
3294}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003295EXPORT_SYMBOL_GPL(trace_vbprintk);
3296
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003297__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003298static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003299__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003300 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003301{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003302 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003303 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003304 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003305 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003306 unsigned long flags;
3307 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003308
3309 if (tracing_disabled || tracing_selftest_running)
3310 return 0;
3311
Steven Rostedt07d777f2011-09-22 14:01:55 -04003312 /* Don't pollute graph traces with trace_vprintk internals */
3313 pause_graph_tracing();
3314
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003315 pc = preempt_count();
3316 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003317
Steven Rostedt07d777f2011-09-22 14:01:55 -04003318
3319 tbuffer = get_trace_buf();
3320 if (!tbuffer) {
3321 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003322 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003323 }
3324
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003325 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003326
Steven Rostedt07d777f2011-09-22 14:01:55 -04003327 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003328 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003329 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003330 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3331 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003332 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003333 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003334 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003335 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003336
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003337 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003338 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003339 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003340 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003341 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003342
3343out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003344 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003345 put_trace_buf();
3346
3347out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003348 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003349 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003350
3351 return len;
3352}
Steven Rostedt659372d2009-09-03 19:11:07 -04003353
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003354__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003355int trace_array_vprintk(struct trace_array *tr,
3356 unsigned long ip, const char *fmt, va_list args)
3357{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003358 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003359}
3360
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003361/**
3362 * trace_array_printk - Print a message to a specific instance
3363 * @tr: The instance trace_array descriptor
3364 * @ip: The instruction pointer that this is called from.
3365 * @fmt: The format to print (printf format)
3366 *
3367 * If a subsystem sets up its own instance, they have the right to
3368 * printk strings into their tracing instance buffer using this
3369 * function. Note, this function will not write into the top level
3370 * buffer (use trace_printk() for that), as writing into the top level
3371 * buffer should only have events that can be individually disabled.
3372 * trace_printk() is only used for debugging a kernel, and should not
3373 * be ever encorporated in normal use.
3374 *
3375 * trace_array_printk() can be used, as it will not add noise to the
3376 * top level tracing buffer.
3377 *
3378 * Note, trace_array_init_printk() must be called on @tr before this
3379 * can be used.
3380 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003381__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003382int trace_array_printk(struct trace_array *tr,
3383 unsigned long ip, const char *fmt, ...)
3384{
3385 int ret;
3386 va_list ap;
3387
Divya Indi953ae452019-08-14 10:55:25 -07003388 if (!tr)
3389 return -ENOENT;
3390
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003391 /* This is only allowed for created instances */
3392 if (tr == &global_trace)
3393 return 0;
3394
3395 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3396 return 0;
3397
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003398 va_start(ap, fmt);
3399 ret = trace_array_vprintk(tr, ip, fmt, ap);
3400 va_end(ap);
3401 return ret;
3402}
Divya Indif45d1222019-03-20 11:28:51 -07003403EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003404
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003405/**
3406 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3407 * @tr: The trace array to initialize the buffers for
3408 *
3409 * As trace_array_printk() only writes into instances, they are OK to
3410 * have in the kernel (unlike trace_printk()). This needs to be called
3411 * before trace_array_printk() can be used on a trace_array.
3412 */
3413int trace_array_init_printk(struct trace_array *tr)
3414{
3415 if (!tr)
3416 return -ENOENT;
3417
3418 /* This is only allowed for created instances */
3419 if (tr == &global_trace)
3420 return -EINVAL;
3421
3422 return alloc_percpu_trace_buffer();
3423}
3424EXPORT_SYMBOL_GPL(trace_array_init_printk);
3425
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003426__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003427int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003428 unsigned long ip, const char *fmt, ...)
3429{
3430 int ret;
3431 va_list ap;
3432
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003433 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003434 return 0;
3435
3436 va_start(ap, fmt);
3437 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3438 va_end(ap);
3439 return ret;
3440}
3441
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003442__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003443int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3444{
Steven Rostedta813a152009-10-09 01:41:35 -04003445 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003446}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003447EXPORT_SYMBOL_GPL(trace_vprintk);
3448
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003449static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003450{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003451 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3452
Steven Rostedt5a90f572008-09-03 17:42:51 -04003453 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003454 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003455 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003456}
3457
Ingo Molnare309b412008-05-12 21:20:51 +02003458static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003459peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3460 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003461{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003462 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003463 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003464
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003465 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003466 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003467 if (lost_events)
3468 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3469 (unsigned long)-1 : 0;
3470 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003471 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003472 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003473 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003474
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003475 if (event) {
3476 iter->ent_size = ring_buffer_event_length(event);
3477 return ring_buffer_event_data(event);
3478 }
3479 iter->ent_size = 0;
3480 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003481}
Steven Rostedtd7690412008-10-01 00:29:53 -04003482
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003483static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003484__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3485 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003486{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003487 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003489 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003490 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003491 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003492 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003493 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003494 int cpu;
3495
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003496 /*
3497 * If we are in a per_cpu trace file, don't bother by iterating over
3498 * all cpu and peek directly.
3499 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003500 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003501 if (ring_buffer_empty_cpu(buffer, cpu_file))
3502 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003503 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003504 if (ent_cpu)
3505 *ent_cpu = cpu_file;
3506
3507 return ent;
3508 }
3509
Steven Rostedtab464282008-05-12 21:21:00 +02003510 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003511
3512 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003513 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003514
Steven Rostedtbc21b472010-03-31 19:49:26 -04003515 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003516
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003517 /*
3518 * Pick the entry with the smallest timestamp:
3519 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003520 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003521 next = ent;
3522 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003523 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003524 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003525 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526 }
3527 }
3528
Steven Rostedt12b5da32012-03-27 10:43:28 -04003529 iter->ent_size = next_size;
3530
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531 if (ent_cpu)
3532 *ent_cpu = next_cpu;
3533
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003534 if (ent_ts)
3535 *ent_ts = next_ts;
3536
Steven Rostedtbc21b472010-03-31 19:49:26 -04003537 if (missing_events)
3538 *missing_events = next_lost;
3539
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 return next;
3541}
3542
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003543#define STATIC_TEMP_BUF_SIZE 128
3544static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3545
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003546/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003547struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3548 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003549{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003550 /* __find_next_entry will reset ent_size */
3551 int ent_size = iter->ent_size;
3552 struct trace_entry *entry;
3553
3554 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003555 * If called from ftrace_dump(), then the iter->temp buffer
3556 * will be the static_temp_buf and not created from kmalloc.
3557 * If the entry size is greater than the buffer, we can
3558 * not save it. Just return NULL in that case. This is only
3559 * used to add markers when two consecutive events' time
3560 * stamps have a large delta. See trace_print_lat_context()
3561 */
3562 if (iter->temp == static_temp_buf &&
3563 STATIC_TEMP_BUF_SIZE < ent_size)
3564 return NULL;
3565
3566 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003567 * The __find_next_entry() may call peek_next_entry(), which may
3568 * call ring_buffer_peek() that may make the contents of iter->ent
3569 * undefined. Need to copy iter->ent now.
3570 */
3571 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003572 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3573 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003574 void *temp;
3575 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3576 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003577 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003578 kfree(iter->temp);
3579 iter->temp = temp;
3580 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003581 }
3582 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003583 iter->ent = iter->temp;
3584 }
3585 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3586 /* Put back the original ent_size */
3587 iter->ent_size = ent_size;
3588
3589 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003590}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003591
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003592/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003593void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003594{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003595 iter->ent = __find_next_entry(iter, &iter->cpu,
3596 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003597
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003598 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003599 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003600
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003601 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003602}
3603
Ingo Molnare309b412008-05-12 21:20:51 +02003604static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003605{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003606 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003607 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003608}
3609
Ingo Molnare309b412008-05-12 21:20:51 +02003610static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003611{
3612 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003614 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003616 WARN_ON_ONCE(iter->leftover);
3617
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003618 (*pos)++;
3619
3620 /* can't go backwards */
3621 if (iter->idx > i)
3622 return NULL;
3623
3624 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003625 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003626 else
3627 ent = iter;
3628
3629 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003630 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003631
3632 iter->pos = *pos;
3633
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003634 return ent;
3635}
3636
Jason Wessel955b61e2010-08-05 09:22:23 -05003637void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003638{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003639 struct ring_buffer_iter *buf_iter;
3640 unsigned long entries = 0;
3641 u64 ts;
3642
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003643 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003644
Steven Rostedt6d158a82012-06-27 20:46:14 -04003645 buf_iter = trace_buffer_iter(iter, cpu);
3646 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003647 return;
3648
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003649 ring_buffer_iter_reset(buf_iter);
3650
3651 /*
3652 * We could have the case with the max latency tracers
3653 * that a reset never took place on a cpu. This is evident
3654 * by the timestamp being before the start of the buffer.
3655 */
YangHui69243722020-06-16 11:36:46 +08003656 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003657 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003658 break;
3659 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003660 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003661 }
3662
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003663 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003664}
3665
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003666/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003667 * The current tracer is copied to avoid a global locking
3668 * all around.
3669 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003670static void *s_start(struct seq_file *m, loff_t *pos)
3671{
3672 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003673 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003674 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003675 void *p = NULL;
3676 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003677 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003678
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003679 /*
3680 * copy the tracer to avoid using a global lock all around.
3681 * iter->trace is a copy of current_trace, the pointer to the
3682 * name may be used instead of a strcmp(), as iter->trace->name
3683 * will point to the same string as current_trace->name.
3684 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003685 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003686 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3687 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003688 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003689
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003690#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003691 if (iter->snapshot && iter->trace->use_max_tr)
3692 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003693#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003694
3695 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003696 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003697
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003698 if (*pos != iter->pos) {
3699 iter->ent = NULL;
3700 iter->cpu = 0;
3701 iter->idx = -1;
3702
Steven Rostedtae3b5092013-01-23 15:22:59 -05003703 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003704 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003705 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003706 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003707 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003708
Lai Jiangshanac91d852010-03-02 17:54:50 +08003709 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003710 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3711 ;
3712
3713 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003714 /*
3715 * If we overflowed the seq_file before, then we want
3716 * to just reuse the trace_seq buffer again.
3717 */
3718 if (iter->leftover)
3719 p = iter;
3720 else {
3721 l = *pos - 1;
3722 p = s_next(m, p, &l);
3723 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003724 }
3725
Lai Jiangshan4f535962009-05-18 19:35:34 +08003726 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003727 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003728 return p;
3729}
3730
3731static void s_stop(struct seq_file *m, void *p)
3732{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003733 struct trace_iterator *iter = m->private;
3734
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003735#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003736 if (iter->snapshot && iter->trace->use_max_tr)
3737 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003738#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003739
3740 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003741 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003742
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003743 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003744 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003745}
3746
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003747static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003748get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003749 unsigned long *entries, int cpu)
3750{
3751 unsigned long count;
3752
3753 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3754 /*
3755 * If this buffer has skipped entries, then we hold all
3756 * entries for the trace and we need to ignore the
3757 * ones before the time stamp.
3758 */
3759 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3760 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3761 /* total is the same as the entries */
3762 *total = count;
3763 } else
3764 *total = count +
3765 ring_buffer_overrun_cpu(buf->buffer, cpu);
3766 *entries = count;
3767}
3768
3769static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003770get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003771 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003772{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003773 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003774 int cpu;
3775
3776 *total = 0;
3777 *entries = 0;
3778
3779 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003780 get_total_entries_cpu(buf, &t, &e, cpu);
3781 *total += t;
3782 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003783 }
3784}
3785
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003786unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3787{
3788 unsigned long total, entries;
3789
3790 if (!tr)
3791 tr = &global_trace;
3792
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003793 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003794
3795 return entries;
3796}
3797
3798unsigned long trace_total_entries(struct trace_array *tr)
3799{
3800 unsigned long total, entries;
3801
3802 if (!tr)
3803 tr = &global_trace;
3804
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003805 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003806
3807 return entries;
3808}
3809
Ingo Molnare309b412008-05-12 21:20:51 +02003810static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003811{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003812 seq_puts(m, "# _------=> CPU# \n"
3813 "# / _-----=> irqs-off \n"
3814 "# | / _----=> need-resched \n"
3815 "# || / _---=> hardirq/softirq \n"
3816 "# ||| / _--=> preempt-depth \n"
3817 "# |||| / delay \n"
3818 "# cmd pid ||||| time | caller \n"
3819 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003820}
3821
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003822static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003823{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003824 unsigned long total;
3825 unsigned long entries;
3826
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003827 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003828 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3829 entries, total, num_online_cpus());
3830 seq_puts(m, "#\n");
3831}
3832
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003833static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003834 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003835{
Joel Fernandes441dae82017-06-25 22:38:43 -07003836 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3837
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003838 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003839
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003840 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3841 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003842}
3843
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003844static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003845 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003846{
Joel Fernandes441dae82017-06-25 22:38:43 -07003847 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003848 const char *space = " ";
3849 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003850
Quentin Perret9e738212019-02-14 15:29:50 +00003851 print_event_info(buf, m);
3852
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003853 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3854 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3855 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3856 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3857 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3858 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3859 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003860}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003861
Jiri Olsa62b915f2010-04-02 19:01:22 +02003862void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003863print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3864{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003865 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003866 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003867 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003868 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003869 unsigned long entries;
3870 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871 const char *name = "preemption";
3872
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003873 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003874
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003875 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003876
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003877 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003878 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003879 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003880 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003881 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003882 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003883 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003885 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003886 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003887#if defined(CONFIG_PREEMPT_NONE)
3888 "server",
3889#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3890 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003891#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003892 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003893#elif defined(CONFIG_PREEMPT_RT)
3894 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003895#else
3896 "unknown",
3897#endif
3898 /* These are reserved for later use */
3899 0, 0, 0, 0);
3900#ifdef CONFIG_SMP
3901 seq_printf(m, " #P:%d)\n", num_online_cpus());
3902#else
3903 seq_puts(m, ")\n");
3904#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003905 seq_puts(m, "# -----------------\n");
3906 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003908 data->comm, data->pid,
3909 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003910 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003911 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912
3913 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003914 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003915 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3916 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003917 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003918 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3919 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003920 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921 }
3922
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003923 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003924}
3925
Steven Rostedta3097202008-11-07 22:36:02 -05003926static void test_cpu_buff_start(struct trace_iterator *iter)
3927{
3928 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003929 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003930
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003931 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003932 return;
3933
3934 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3935 return;
3936
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003937 if (cpumask_available(iter->started) &&
3938 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003939 return;
3940
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003941 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003942 return;
3943
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003944 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003945 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003946
3947 /* Don't print started cpu buffer for the first entry of the trace */
3948 if (iter->idx > 1)
3949 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3950 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003951}
3952
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003953static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003954{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003955 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003956 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003957 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003958 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003959 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003960
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003961 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003962
Steven Rostedta3097202008-11-07 22:36:02 -05003963 test_cpu_buff_start(iter);
3964
Steven Rostedtf633cef2008-12-23 23:24:13 -05003965 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003966
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003967 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003968 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3969 trace_print_lat_context(iter);
3970 else
3971 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003972 }
3973
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003974 if (trace_seq_has_overflowed(s))
3975 return TRACE_TYPE_PARTIAL_LINE;
3976
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003977 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003978 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003979
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003980 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003981
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003982 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003983}
3984
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003985static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003986{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003987 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003988 struct trace_seq *s = &iter->seq;
3989 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003990 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003991
3992 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003993
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003994 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003995 trace_seq_printf(s, "%d %d %llu ",
3996 entry->pid, iter->cpu, iter->ts);
3997
3998 if (trace_seq_has_overflowed(s))
3999 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004000
Steven Rostedtf633cef2008-12-23 23:24:13 -05004001 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004002 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004003 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004004
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004005 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004006
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004007 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004008}
4009
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004010static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004011{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004012 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004013 struct trace_seq *s = &iter->seq;
4014 unsigned char newline = '\n';
4015 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004016 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004017
4018 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004019
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004020 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004021 SEQ_PUT_HEX_FIELD(s, entry->pid);
4022 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4023 SEQ_PUT_HEX_FIELD(s, iter->ts);
4024 if (trace_seq_has_overflowed(s))
4025 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004026 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004027
Steven Rostedtf633cef2008-12-23 23:24:13 -05004028 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004029 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004030 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004031 if (ret != TRACE_TYPE_HANDLED)
4032 return ret;
4033 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004034
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004035 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004036
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004037 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004038}
4039
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004040static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004041{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004042 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004043 struct trace_seq *s = &iter->seq;
4044 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004045 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004046
4047 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004048
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004049 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004050 SEQ_PUT_FIELD(s, entry->pid);
4051 SEQ_PUT_FIELD(s, iter->cpu);
4052 SEQ_PUT_FIELD(s, iter->ts);
4053 if (trace_seq_has_overflowed(s))
4054 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004055 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004056
Steven Rostedtf633cef2008-12-23 23:24:13 -05004057 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004058 return event ? event->funcs->binary(iter, 0, event) :
4059 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004060}
4061
Jiri Olsa62b915f2010-04-02 19:01:22 +02004062int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004063{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004064 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004065 int cpu;
4066
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004067 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004068 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004069 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004070 buf_iter = trace_buffer_iter(iter, cpu);
4071 if (buf_iter) {
4072 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004073 return 0;
4074 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004075 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004076 return 0;
4077 }
4078 return 1;
4079 }
4080
Steven Rostedtab464282008-05-12 21:21:00 +02004081 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004082 buf_iter = trace_buffer_iter(iter, cpu);
4083 if (buf_iter) {
4084 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004085 return 0;
4086 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004087 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004088 return 0;
4089 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004090 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004091
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004092 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004093}
4094
Lai Jiangshan4f535962009-05-18 19:35:34 +08004095/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004096enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004097{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004098 struct trace_array *tr = iter->tr;
4099 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004100 enum print_line_t ret;
4101
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004102 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004103 if (iter->lost_events == (unsigned long)-1)
4104 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4105 iter->cpu);
4106 else
4107 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4108 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004109 if (trace_seq_has_overflowed(&iter->seq))
4110 return TRACE_TYPE_PARTIAL_LINE;
4111 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004112
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004113 if (iter->trace && iter->trace->print_line) {
4114 ret = iter->trace->print_line(iter);
4115 if (ret != TRACE_TYPE_UNHANDLED)
4116 return ret;
4117 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004118
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004119 if (iter->ent->type == TRACE_BPUTS &&
4120 trace_flags & TRACE_ITER_PRINTK &&
4121 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4122 return trace_print_bputs_msg_only(iter);
4123
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004124 if (iter->ent->type == TRACE_BPRINT &&
4125 trace_flags & TRACE_ITER_PRINTK &&
4126 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004127 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004128
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004129 if (iter->ent->type == TRACE_PRINT &&
4130 trace_flags & TRACE_ITER_PRINTK &&
4131 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004132 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004133
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004134 if (trace_flags & TRACE_ITER_BIN)
4135 return print_bin_fmt(iter);
4136
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004137 if (trace_flags & TRACE_ITER_HEX)
4138 return print_hex_fmt(iter);
4139
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004140 if (trace_flags & TRACE_ITER_RAW)
4141 return print_raw_fmt(iter);
4142
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004143 return print_trace_fmt(iter);
4144}
4145
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004146void trace_latency_header(struct seq_file *m)
4147{
4148 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004149 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004150
4151 /* print nothing if the buffers are empty */
4152 if (trace_empty(iter))
4153 return;
4154
4155 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4156 print_trace_header(m, iter);
4157
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004158 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004159 print_lat_help_header(m);
4160}
4161
Jiri Olsa62b915f2010-04-02 19:01:22 +02004162void trace_default_header(struct seq_file *m)
4163{
4164 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004165 struct trace_array *tr = iter->tr;
4166 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004167
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004168 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4169 return;
4170
Jiri Olsa62b915f2010-04-02 19:01:22 +02004171 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4172 /* print nothing if the buffers are empty */
4173 if (trace_empty(iter))
4174 return;
4175 print_trace_header(m, iter);
4176 if (!(trace_flags & TRACE_ITER_VERBOSE))
4177 print_lat_help_header(m);
4178 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004179 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4180 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004181 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004182 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004183 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004184 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004185 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004186 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004187 }
4188}
4189
Steven Rostedte0a413f2011-09-29 21:26:16 -04004190static void test_ftrace_alive(struct seq_file *m)
4191{
4192 if (!ftrace_is_dead())
4193 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004194 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4195 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004196}
4197
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004198#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004199static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004200{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004201 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4202 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4203 "# Takes a snapshot of the main buffer.\n"
4204 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4205 "# (Doesn't have to be '2' works with any number that\n"
4206 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004207}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004208
4209static void show_snapshot_percpu_help(struct seq_file *m)
4210{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004211 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004212#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004213 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4214 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004215#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004216 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4217 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004218#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004219 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4220 "# (Doesn't have to be '2' works with any number that\n"
4221 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004222}
4223
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004224static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4225{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004226 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004227 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004228 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004229 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004230
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004231 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004232 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4233 show_snapshot_main_help(m);
4234 else
4235 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004236}
4237#else
4238/* Should never be called */
4239static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4240#endif
4241
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004242static int s_show(struct seq_file *m, void *v)
4243{
4244 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004245 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004246
4247 if (iter->ent == NULL) {
4248 if (iter->tr) {
4249 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4250 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004251 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004253 if (iter->snapshot && trace_empty(iter))
4254 print_snapshot_help(m, iter);
4255 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004256 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004257 else
4258 trace_default_header(m);
4259
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004260 } else if (iter->leftover) {
4261 /*
4262 * If we filled the seq_file buffer earlier, we
4263 * want to just show it now.
4264 */
4265 ret = trace_print_seq(m, &iter->seq);
4266
4267 /* ret should this time be zero, but you never know */
4268 iter->leftover = ret;
4269
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004270 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004271 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004272 ret = trace_print_seq(m, &iter->seq);
4273 /*
4274 * If we overflow the seq_file buffer, then it will
4275 * ask us for this data again at start up.
4276 * Use that instead.
4277 * ret is 0 if seq_file write succeeded.
4278 * -1 otherwise.
4279 */
4280 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004281 }
4282
4283 return 0;
4284}
4285
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004286/*
4287 * Should be used after trace_array_get(), trace_types_lock
4288 * ensures that i_cdev was already initialized.
4289 */
4290static inline int tracing_get_cpu(struct inode *inode)
4291{
4292 if (inode->i_cdev) /* See trace_create_cpu_file() */
4293 return (long)inode->i_cdev - 1;
4294 return RING_BUFFER_ALL_CPUS;
4295}
4296
James Morris88e9d342009-09-22 16:43:43 -07004297static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004298 .start = s_start,
4299 .next = s_next,
4300 .stop = s_stop,
4301 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004302};
4303
Ingo Molnare309b412008-05-12 21:20:51 +02004304static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004305__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004306{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004307 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004308 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004309 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004310
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004311 if (tracing_disabled)
4312 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004313
Jiri Olsa50e18b92012-04-25 10:23:39 +02004314 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004315 if (!iter)
4316 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004317
Gil Fruchter72917232015-06-09 10:32:35 +03004318 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004319 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004320 if (!iter->buffer_iter)
4321 goto release;
4322
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004323 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004324 * trace_find_next_entry() may need to save off iter->ent.
4325 * It will place it into the iter->temp buffer. As most
4326 * events are less than 128, allocate a buffer of that size.
4327 * If one is greater, then trace_find_next_entry() will
4328 * allocate a new buffer to adjust for the bigger iter->ent.
4329 * It's not critical if it fails to get allocated here.
4330 */
4331 iter->temp = kmalloc(128, GFP_KERNEL);
4332 if (iter->temp)
4333 iter->temp_size = 128;
4334
4335 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336 * We make a copy of the current tracer to avoid concurrent
4337 * changes on it while we are reading.
4338 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004339 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004340 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004341 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004342 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004343
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004344 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004345
Li Zefan79f55992009-06-15 14:58:26 +08004346 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004347 goto fail;
4348
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004349 iter->tr = tr;
4350
4351#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004352 /* Currently only the top directory has a snapshot */
4353 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004354 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004355 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004356#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004357 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004358 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004359 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004360 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004361 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004362
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004363 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004364 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004365 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004366
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004367 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004368 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004369 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4370
David Sharp8be07092012-11-13 12:18:22 -08004371 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004372 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004373 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4374
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004375 /*
4376 * If pause-on-trace is enabled, then stop the trace while
4377 * dumping, unless this is the "snapshot" file
4378 */
4379 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004380 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004381
Steven Rostedtae3b5092013-01-23 15:22:59 -05004382 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004383 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004384 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004385 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004386 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004387 }
4388 ring_buffer_read_prepare_sync();
4389 for_each_tracing_cpu(cpu) {
4390 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004391 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004392 }
4393 } else {
4394 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004395 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004396 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004397 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004398 ring_buffer_read_prepare_sync();
4399 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004400 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004401 }
4402
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004403 mutex_unlock(&trace_types_lock);
4404
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004405 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004406
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004407 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004408 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004409 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004410 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004411 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004412release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004413 seq_release_private(inode, file);
4414 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004415}
4416
4417int tracing_open_generic(struct inode *inode, struct file *filp)
4418{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004419 int ret;
4420
4421 ret = tracing_check_open_get_tr(NULL);
4422 if (ret)
4423 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004424
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004425 filp->private_data = inode->i_private;
4426 return 0;
4427}
4428
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004429bool tracing_is_disabled(void)
4430{
4431 return (tracing_disabled) ? true: false;
4432}
4433
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004434/*
4435 * Open and update trace_array ref count.
4436 * Must have the current trace_array passed to it.
4437 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004438int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004439{
4440 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004441 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004442
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004443 ret = tracing_check_open_get_tr(tr);
4444 if (ret)
4445 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004446
4447 filp->private_data = inode->i_private;
4448
4449 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004450}
4451
Hannes Eder4fd27352009-02-10 19:44:12 +01004452static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004454 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004455 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004456 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004457 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004458
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004459 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004460 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004461 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004462 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004463
Oleg Nesterov6484c712013-07-23 17:26:10 +02004464 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004465 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004466 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004467
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004468 for_each_tracing_cpu(cpu) {
4469 if (iter->buffer_iter[cpu])
4470 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4471 }
4472
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004473 if (iter->trace && iter->trace->close)
4474 iter->trace->close(iter);
4475
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004476 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004477 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004478 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004479
4480 __trace_array_put(tr);
4481
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004482 mutex_unlock(&trace_types_lock);
4483
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004484 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004485 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004486 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004487 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004488 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004489 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004490
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004491 return 0;
4492}
4493
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004494static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4495{
4496 struct trace_array *tr = inode->i_private;
4497
4498 trace_array_put(tr);
4499 return 0;
4500}
4501
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004502static int tracing_single_release_tr(struct inode *inode, struct file *file)
4503{
4504 struct trace_array *tr = inode->i_private;
4505
4506 trace_array_put(tr);
4507
4508 return single_release(inode, file);
4509}
4510
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004511static int tracing_open(struct inode *inode, struct file *file)
4512{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004513 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004514 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004515 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004516
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004517 ret = tracing_check_open_get_tr(tr);
4518 if (ret)
4519 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004520
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004521 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004522 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4523 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004524 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004525
4526#ifdef CONFIG_TRACER_MAX_TRACE
4527 if (tr->current_trace->print_max)
4528 trace_buf = &tr->max_buffer;
4529#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004530
4531 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004532 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004533 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004534 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004535 }
4536
4537 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004538 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004539 if (IS_ERR(iter))
4540 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004541 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004542 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4543 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004544
4545 if (ret < 0)
4546 trace_array_put(tr);
4547
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004548 return ret;
4549}
4550
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004551/*
4552 * Some tracers are not suitable for instance buffers.
4553 * A tracer is always available for the global array (toplevel)
4554 * or if it explicitly states that it is.
4555 */
4556static bool
4557trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4558{
4559 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4560}
4561
4562/* Find the next tracer that this trace array may use */
4563static struct tracer *
4564get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4565{
4566 while (t && !trace_ok_for_array(t, tr))
4567 t = t->next;
4568
4569 return t;
4570}
4571
Ingo Molnare309b412008-05-12 21:20:51 +02004572static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004573t_next(struct seq_file *m, void *v, loff_t *pos)
4574{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004575 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004576 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004577
4578 (*pos)++;
4579
4580 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004581 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004582
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004583 return t;
4584}
4585
4586static void *t_start(struct seq_file *m, loff_t *pos)
4587{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004588 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004589 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004590 loff_t l = 0;
4591
4592 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004593
4594 t = get_tracer_for_array(tr, trace_types);
4595 for (; t && l < *pos; t = t_next(m, t, &l))
4596 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004597
4598 return t;
4599}
4600
4601static void t_stop(struct seq_file *m, void *p)
4602{
4603 mutex_unlock(&trace_types_lock);
4604}
4605
4606static int t_show(struct seq_file *m, void *v)
4607{
4608 struct tracer *t = v;
4609
4610 if (!t)
4611 return 0;
4612
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004613 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004614 if (t->next)
4615 seq_putc(m, ' ');
4616 else
4617 seq_putc(m, '\n');
4618
4619 return 0;
4620}
4621
James Morris88e9d342009-09-22 16:43:43 -07004622static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004623 .start = t_start,
4624 .next = t_next,
4625 .stop = t_stop,
4626 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004627};
4628
4629static int show_traces_open(struct inode *inode, struct file *file)
4630{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004631 struct trace_array *tr = inode->i_private;
4632 struct seq_file *m;
4633 int ret;
4634
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004635 ret = tracing_check_open_get_tr(tr);
4636 if (ret)
4637 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004638
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004639 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004640 if (ret) {
4641 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004642 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004643 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004644
4645 m = file->private_data;
4646 m->private = tr;
4647
4648 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004649}
4650
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004651static int show_traces_release(struct inode *inode, struct file *file)
4652{
4653 struct trace_array *tr = inode->i_private;
4654
4655 trace_array_put(tr);
4656 return seq_release(inode, file);
4657}
4658
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004659static ssize_t
4660tracing_write_stub(struct file *filp, const char __user *ubuf,
4661 size_t count, loff_t *ppos)
4662{
4663 return count;
4664}
4665
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004666loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004667{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004668 int ret;
4669
Slava Pestov364829b2010-11-24 15:13:16 -08004670 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004671 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004672 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004673 file->f_pos = ret = 0;
4674
4675 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004676}
4677
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004678static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004679 .open = tracing_open,
4680 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004681 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004682 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004683 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004684};
4685
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004686static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004687 .open = show_traces_open,
4688 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004689 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004690 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004691};
4692
4693static ssize_t
4694tracing_cpumask_read(struct file *filp, char __user *ubuf,
4695 size_t count, loff_t *ppos)
4696{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004697 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004698 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004699 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004700
Changbin Du90e406f2017-11-30 11:39:43 +08004701 len = snprintf(NULL, 0, "%*pb\n",
4702 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4703 mask_str = kmalloc(len, GFP_KERNEL);
4704 if (!mask_str)
4705 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004706
Changbin Du90e406f2017-11-30 11:39:43 +08004707 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004708 cpumask_pr_args(tr->tracing_cpumask));
4709 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004710 count = -EINVAL;
4711 goto out_err;
4712 }
Changbin Du90e406f2017-11-30 11:39:43 +08004713 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004714
4715out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004716 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004717
4718 return count;
4719}
4720
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004721int tracing_set_cpumask(struct trace_array *tr,
4722 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004723{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004724 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304725
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004726 if (!tr)
4727 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004728
Steven Rostedta5e25882008-12-02 15:34:05 -05004729 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004730 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004731 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004732 /*
4733 * Increase/decrease the disabled counter if we are
4734 * about to flip a bit in the cpumask:
4735 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004736 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304737 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004738 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4739 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004740 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004741 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304742 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004743 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4744 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004745 }
4746 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004747 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004748 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004749
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004750 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004751
4752 return 0;
4753}
4754
4755static ssize_t
4756tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4757 size_t count, loff_t *ppos)
4758{
4759 struct trace_array *tr = file_inode(filp)->i_private;
4760 cpumask_var_t tracing_cpumask_new;
4761 int err;
4762
4763 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4764 return -ENOMEM;
4765
4766 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4767 if (err)
4768 goto err_free;
4769
4770 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4771 if (err)
4772 goto err_free;
4773
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304774 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004775
Ingo Molnarc7078de2008-05-12 21:20:52 +02004776 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004777
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004778err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004779 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004780
4781 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004782}
4783
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004784static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004785 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004786 .read = tracing_cpumask_read,
4787 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004788 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004789 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004790};
4791
Li Zefanfdb372e2009-12-08 11:15:59 +08004792static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004793{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004794 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004795 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004796 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004797 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004798
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004799 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004800 tracer_flags = tr->current_trace->flags->val;
4801 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004802
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004803 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004804 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004805 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004806 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004807 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004808 }
4809
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004810 for (i = 0; trace_opts[i].name; i++) {
4811 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004812 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004813 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004814 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004815 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004816 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004817
Li Zefanfdb372e2009-12-08 11:15:59 +08004818 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004819}
4820
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004821static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004822 struct tracer_flags *tracer_flags,
4823 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004824{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004825 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004826 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004827
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004828 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004829 if (ret)
4830 return ret;
4831
4832 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004833 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004834 else
Zhaolei77708412009-08-07 18:53:21 +08004835 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004836 return 0;
4837}
4838
Li Zefan8d18eaa2009-12-08 11:17:06 +08004839/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004840static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004841{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004842 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004843 struct tracer_flags *tracer_flags = trace->flags;
4844 struct tracer_opt *opts = NULL;
4845 int i;
4846
4847 for (i = 0; tracer_flags->opts[i].name; i++) {
4848 opts = &tracer_flags->opts[i];
4849
4850 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004851 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004852 }
4853
4854 return -EINVAL;
4855}
4856
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004857/* Some tracers require overwrite to stay enabled */
4858int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4859{
4860 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4861 return -1;
4862
4863 return 0;
4864}
4865
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004866int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004867{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004868 if ((mask == TRACE_ITER_RECORD_TGID) ||
4869 (mask == TRACE_ITER_RECORD_CMD))
4870 lockdep_assert_held(&event_mutex);
4871
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004872 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004873 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004874 return 0;
4875
4876 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004877 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004878 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004879 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004880
4881 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004882 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004883 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004884 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004885
4886 if (mask == TRACE_ITER_RECORD_CMD)
4887 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004888
Joel Fernandesd914ba32017-06-26 19:01:55 -07004889 if (mask == TRACE_ITER_RECORD_TGID) {
4890 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004891 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004892 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004893 GFP_KERNEL);
4894 if (!tgid_map) {
4895 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4896 return -ENOMEM;
4897 }
4898
4899 trace_event_enable_tgid_record(enabled);
4900 }
4901
Steven Rostedtc37775d2016-04-13 16:59:18 -04004902 if (mask == TRACE_ITER_EVENT_FORK)
4903 trace_event_follow_fork(tr, enabled);
4904
Namhyung Kim1e104862017-04-17 11:44:28 +09004905 if (mask == TRACE_ITER_FUNC_FORK)
4906 ftrace_pid_follow_fork(tr, enabled);
4907
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004908 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004909 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004910#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004911 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004912#endif
4913 }
Steven Rostedt81698832012-10-11 10:15:05 -04004914
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004915 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004916 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004917 trace_printk_control(enabled);
4918 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004919
4920 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004921}
4922
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004923int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004924{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004925 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004926 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004927 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004928 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004929 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004930
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004931 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004932
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004933 len = str_has_prefix(cmp, "no");
4934 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004935 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004936
4937 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004938
Prateek Sood3a53acf2019-12-10 09:15:16 +00004939 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004940 mutex_lock(&trace_types_lock);
4941
Yisheng Xie591a0332018-05-17 16:36:03 +08004942 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004943 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004944 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004945 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004946 else
4947 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004948
4949 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004950 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004951
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004952 /*
4953 * If the first trailing whitespace is replaced with '\0' by strstrip,
4954 * turn it back into a space.
4955 */
4956 if (orig_len > strlen(option))
4957 option[strlen(option)] = ' ';
4958
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004959 return ret;
4960}
4961
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004962static void __init apply_trace_boot_options(void)
4963{
4964 char *buf = trace_boot_options_buf;
4965 char *option;
4966
4967 while (true) {
4968 option = strsep(&buf, ",");
4969
4970 if (!option)
4971 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004972
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004973 if (*option)
4974 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004975
4976 /* Put back the comma to allow this to be called again */
4977 if (buf)
4978 *(buf - 1) = ',';
4979 }
4980}
4981
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004982static ssize_t
4983tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4984 size_t cnt, loff_t *ppos)
4985{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004986 struct seq_file *m = filp->private_data;
4987 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004988 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004989 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004990
4991 if (cnt >= sizeof(buf))
4992 return -EINVAL;
4993
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004994 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004995 return -EFAULT;
4996
Steven Rostedta8dd2172013-01-09 20:54:17 -05004997 buf[cnt] = 0;
4998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004999 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005000 if (ret < 0)
5001 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005002
Jiri Olsacf8517c2009-10-23 19:36:16 -04005003 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005004
5005 return cnt;
5006}
5007
Li Zefanfdb372e2009-12-08 11:15:59 +08005008static int tracing_trace_options_open(struct inode *inode, struct file *file)
5009{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005010 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005011 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005012
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005013 ret = tracing_check_open_get_tr(tr);
5014 if (ret)
5015 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005016
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005017 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5018 if (ret < 0)
5019 trace_array_put(tr);
5020
5021 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005022}
5023
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005024static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005025 .open = tracing_trace_options_open,
5026 .read = seq_read,
5027 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005028 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005029 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005030};
5031
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005032static const char readme_msg[] =
5033 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005034 "# echo 0 > tracing_on : quick way to disable tracing\n"
5035 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5036 " Important files:\n"
5037 " trace\t\t\t- The static contents of the buffer\n"
5038 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5039 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5040 " current_tracer\t- function and latency tracers\n"
5041 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005042 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005043 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5044 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5045 " trace_clock\t\t-change the clock used to order events\n"
5046 " local: Per cpu clock but may not be synced across CPUs\n"
5047 " global: Synced across CPUs but slows tracing down.\n"
5048 " counter: Not a clock, but just an increment\n"
5049 " uptime: Jiffy counter from time of boot\n"
5050 " perf: Same clock that perf events use\n"
5051#ifdef CONFIG_X86_64
5052 " x86-tsc: TSC cycle counter\n"
5053#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005054 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5055 " delta: Delta difference against a buffer-wide timestamp\n"
5056 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005057 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005058 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005059 " tracing_cpumask\t- Limit which CPUs to trace\n"
5060 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5061 "\t\t\t Remove sub-buffer with rmdir\n"
5062 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005063 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005064 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005065 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005066#ifdef CONFIG_DYNAMIC_FTRACE
5067 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005068 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5069 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005070 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005071 "\t modules: Can select a group via module\n"
5072 "\t Format: :mod:<module-name>\n"
5073 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5074 "\t triggers: a command to perform when function is hit\n"
5075 "\t Format: <function>:<trigger>[:count]\n"
5076 "\t trigger: traceon, traceoff\n"
5077 "\t\t enable_event:<system>:<event>\n"
5078 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005079#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005080 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005081#endif
5082#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005083 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005084#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005085 "\t\t dump\n"
5086 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005087 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5088 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5089 "\t The first one will disable tracing every time do_fault is hit\n"
5090 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5091 "\t The first time do trap is hit and it disables tracing, the\n"
5092 "\t counter will decrement to 2. If tracing is already disabled,\n"
5093 "\t the counter will not decrement. It only decrements when the\n"
5094 "\t trigger did work\n"
5095 "\t To remove trigger without count:\n"
5096 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5097 "\t To remove trigger with a count:\n"
5098 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005099 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005100 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5101 "\t modules: Can select a group via module command :mod:\n"
5102 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005103#endif /* CONFIG_DYNAMIC_FTRACE */
5104#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005105 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5106 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005107 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5108 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005109#endif
5110#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5111 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005112 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005113 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5114#endif
5115#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005116 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5117 "\t\t\t snapshot buffer. Read the contents for more\n"
5118 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005119#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005120#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005121 " stack_trace\t\t- Shows the max stack trace when active\n"
5122 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005123 "\t\t\t Write into this file to reset the max size (trigger a\n"
5124 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005125#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005126 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5127 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005128#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005129#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005130#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005131 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005132 "\t\t\t Write into this file to define/undefine new trace events.\n"
5133#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005134#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005135 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005136 "\t\t\t Write into this file to define/undefine new trace events.\n"
5137#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005138#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005139 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005140 "\t\t\t Write into this file to define/undefine new trace events.\n"
5141#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005142#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005143 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005144 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5145 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005146#ifdef CONFIG_HIST_TRIGGERS
5147 "\t s:[synthetic/]<event> <field> [<field>]\n"
5148#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005149 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005150#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005151 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005152 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005153#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005154#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005155 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005156#endif
5157 "\t args: <name>=fetcharg[:type]\n"
5158 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005159#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005160 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005161#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005162 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005163#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005164 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005165 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005166 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005167 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005168#ifdef CONFIG_HIST_TRIGGERS
5169 "\t field: <stype> <name>;\n"
5170 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5171 "\t [unsigned] char/int/long\n"
5172#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005173#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005174 " events/\t\t- Directory containing all trace event subsystems:\n"
5175 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5176 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005177 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5178 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005179 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005180 " events/<system>/<event>/\t- Directory containing control files for\n"
5181 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005182 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5183 " filter\t\t- If set, only events passing filter are traced\n"
5184 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005185 "\t Format: <trigger>[:count][if <filter>]\n"
5186 "\t trigger: traceon, traceoff\n"
5187 "\t enable_event:<system>:<event>\n"
5188 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005189#ifdef CONFIG_HIST_TRIGGERS
5190 "\t enable_hist:<system>:<event>\n"
5191 "\t disable_hist:<system>:<event>\n"
5192#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005193#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005194 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005195#endif
5196#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005197 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005198#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005199#ifdef CONFIG_HIST_TRIGGERS
5200 "\t\t hist (see below)\n"
5201#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005202 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5203 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5204 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5205 "\t events/block/block_unplug/trigger\n"
5206 "\t The first disables tracing every time block_unplug is hit.\n"
5207 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5208 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5209 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5210 "\t Like function triggers, the counter is only decremented if it\n"
5211 "\t enabled or disabled tracing.\n"
5212 "\t To remove a trigger without a count:\n"
5213 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5214 "\t To remove a trigger with a count:\n"
5215 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5216 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005217#ifdef CONFIG_HIST_TRIGGERS
5218 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005219 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005220 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005221 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005222 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005223 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005224 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005225 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005226 "\t [if <filter>]\n\n"
5227 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005228 "\t table using the key(s) and value(s) named, and the value of a\n"
5229 "\t sum called 'hitcount' is incremented. Keys and values\n"
5230 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005231 "\t can be any field, or the special string 'stacktrace'.\n"
5232 "\t Compound keys consisting of up to two fields can be specified\n"
5233 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5234 "\t fields. Sort keys consisting of up to two fields can be\n"
5235 "\t specified using the 'sort' keyword. The sort direction can\n"
5236 "\t be modified by appending '.descending' or '.ascending' to a\n"
5237 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005238 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5239 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5240 "\t its histogram data will be shared with other triggers of the\n"
5241 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005242 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005243 "\t table in its entirety to stdout. If there are multiple hist\n"
5244 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005245 "\t trigger in the output. The table displayed for a named\n"
5246 "\t trigger will be the same as any other instance having the\n"
5247 "\t same name. The default format used to display a given field\n"
5248 "\t can be modified by appending any of the following modifiers\n"
5249 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005250 "\t .hex display a number as a hex value\n"
5251 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005252 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005253 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005254 "\t .syscall display a syscall id as a syscall name\n"
5255 "\t .log2 display log2 value rather than raw number\n"
5256 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005257 "\t The 'pause' parameter can be used to pause an existing hist\n"
5258 "\t trigger or to start a hist trigger but not log any events\n"
5259 "\t until told to do so. 'continue' can be used to start or\n"
5260 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005261 "\t The 'clear' parameter will clear the contents of a running\n"
5262 "\t hist trigger and leave its current paused/active state\n"
5263 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005264 "\t The enable_hist and disable_hist triggers can be used to\n"
5265 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005266 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005267 "\t the enable_event and disable_event triggers.\n\n"
5268 "\t Hist trigger handlers and actions are executed whenever a\n"
5269 "\t a histogram entry is added or updated. They take the form:\n\n"
5270 "\t <handler>.<action>\n\n"
5271 "\t The available handlers are:\n\n"
5272 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005273 "\t onmax(var) - invoke if var exceeds current max\n"
5274 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005275 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005276 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005277 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005278#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005279 "\t snapshot() - snapshot the trace buffer\n\n"
5280#endif
5281#ifdef CONFIG_SYNTH_EVENTS
5282 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5283 "\t Write into this file to define/undefine new synthetic events.\n"
5284 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005285#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005286#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005287;
5288
5289static ssize_t
5290tracing_readme_read(struct file *filp, char __user *ubuf,
5291 size_t cnt, loff_t *ppos)
5292{
5293 return simple_read_from_buffer(ubuf, cnt, ppos,
5294 readme_msg, strlen(readme_msg));
5295}
5296
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005297static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005298 .open = tracing_open_generic,
5299 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005300 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005301};
5302
Michael Sartain99c621d2017-07-05 22:07:15 -06005303static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5304{
5305 int *ptr = v;
5306
5307 if (*pos || m->count)
5308 ptr++;
5309
5310 (*pos)++;
5311
5312 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5313 if (trace_find_tgid(*ptr))
5314 return ptr;
5315 }
5316
5317 return NULL;
5318}
5319
5320static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5321{
5322 void *v;
5323 loff_t l = 0;
5324
5325 if (!tgid_map)
5326 return NULL;
5327
5328 v = &tgid_map[0];
5329 while (l <= *pos) {
5330 v = saved_tgids_next(m, v, &l);
5331 if (!v)
5332 return NULL;
5333 }
5334
5335 return v;
5336}
5337
5338static void saved_tgids_stop(struct seq_file *m, void *v)
5339{
5340}
5341
5342static int saved_tgids_show(struct seq_file *m, void *v)
5343{
5344 int pid = (int *)v - tgid_map;
5345
5346 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5347 return 0;
5348}
5349
5350static const struct seq_operations tracing_saved_tgids_seq_ops = {
5351 .start = saved_tgids_start,
5352 .stop = saved_tgids_stop,
5353 .next = saved_tgids_next,
5354 .show = saved_tgids_show,
5355};
5356
5357static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5358{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005359 int ret;
5360
5361 ret = tracing_check_open_get_tr(NULL);
5362 if (ret)
5363 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005364
5365 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5366}
5367
5368
5369static const struct file_operations tracing_saved_tgids_fops = {
5370 .open = tracing_saved_tgids_open,
5371 .read = seq_read,
5372 .llseek = seq_lseek,
5373 .release = seq_release,
5374};
5375
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005376static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005377{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005378 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005379
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005380 if (*pos || m->count)
5381 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005382
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005383 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005384
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005385 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5386 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005387 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005388 continue;
5389
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005390 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005391 }
5392
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005393 return NULL;
5394}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005395
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005396static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5397{
5398 void *v;
5399 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005400
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005401 preempt_disable();
5402 arch_spin_lock(&trace_cmdline_lock);
5403
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005404 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005405 while (l <= *pos) {
5406 v = saved_cmdlines_next(m, v, &l);
5407 if (!v)
5408 return NULL;
5409 }
5410
5411 return v;
5412}
5413
5414static void saved_cmdlines_stop(struct seq_file *m, void *v)
5415{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005416 arch_spin_unlock(&trace_cmdline_lock);
5417 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005418}
5419
5420static int saved_cmdlines_show(struct seq_file *m, void *v)
5421{
5422 char buf[TASK_COMM_LEN];
5423 unsigned int *pid = v;
5424
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005425 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005426 seq_printf(m, "%d %s\n", *pid, buf);
5427 return 0;
5428}
5429
5430static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5431 .start = saved_cmdlines_start,
5432 .next = saved_cmdlines_next,
5433 .stop = saved_cmdlines_stop,
5434 .show = saved_cmdlines_show,
5435};
5436
5437static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5438{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005439 int ret;
5440
5441 ret = tracing_check_open_get_tr(NULL);
5442 if (ret)
5443 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005444
5445 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005446}
5447
5448static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005449 .open = tracing_saved_cmdlines_open,
5450 .read = seq_read,
5451 .llseek = seq_lseek,
5452 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005453};
5454
5455static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005456tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5457 size_t cnt, loff_t *ppos)
5458{
5459 char buf[64];
5460 int r;
5461
5462 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005463 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005464 arch_spin_unlock(&trace_cmdline_lock);
5465
5466 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5467}
5468
5469static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5470{
5471 kfree(s->saved_cmdlines);
5472 kfree(s->map_cmdline_to_pid);
5473 kfree(s);
5474}
5475
5476static int tracing_resize_saved_cmdlines(unsigned int val)
5477{
5478 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5479
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005480 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005481 if (!s)
5482 return -ENOMEM;
5483
5484 if (allocate_cmdlines_buffer(val, s) < 0) {
5485 kfree(s);
5486 return -ENOMEM;
5487 }
5488
5489 arch_spin_lock(&trace_cmdline_lock);
5490 savedcmd_temp = savedcmd;
5491 savedcmd = s;
5492 arch_spin_unlock(&trace_cmdline_lock);
5493 free_saved_cmdlines_buffer(savedcmd_temp);
5494
5495 return 0;
5496}
5497
5498static ssize_t
5499tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5500 size_t cnt, loff_t *ppos)
5501{
5502 unsigned long val;
5503 int ret;
5504
5505 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5506 if (ret)
5507 return ret;
5508
5509 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5510 if (!val || val > PID_MAX_DEFAULT)
5511 return -EINVAL;
5512
5513 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5514 if (ret < 0)
5515 return ret;
5516
5517 *ppos += cnt;
5518
5519 return cnt;
5520}
5521
5522static const struct file_operations tracing_saved_cmdlines_size_fops = {
5523 .open = tracing_open_generic,
5524 .read = tracing_saved_cmdlines_size_read,
5525 .write = tracing_saved_cmdlines_size_write,
5526};
5527
Jeremy Linton681bec02017-05-31 16:56:53 -05005528#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005529static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005530update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005531{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005532 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005533 if (ptr->tail.next) {
5534 ptr = ptr->tail.next;
5535 /* Set ptr to the next real item (skip head) */
5536 ptr++;
5537 } else
5538 return NULL;
5539 }
5540 return ptr;
5541}
5542
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005543static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005544{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005545 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005546
5547 /*
5548 * Paranoid! If ptr points to end, we don't want to increment past it.
5549 * This really should never happen.
5550 */
Vasily Averin039958a2020-01-24 10:03:01 +03005551 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005552 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005553 if (WARN_ON_ONCE(!ptr))
5554 return NULL;
5555
5556 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005557 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005558
5559 return ptr;
5560}
5561
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005562static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005563{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005564 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005565 loff_t l = 0;
5566
Jeremy Linton1793ed92017-05-31 16:56:46 -05005567 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005568
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005569 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005570 if (v)
5571 v++;
5572
5573 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005574 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005575 }
5576
5577 return v;
5578}
5579
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005580static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005581{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005582 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005583}
5584
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005585static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005586{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005587 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005588
5589 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005590 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005591 ptr->map.system);
5592
5593 return 0;
5594}
5595
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005596static const struct seq_operations tracing_eval_map_seq_ops = {
5597 .start = eval_map_start,
5598 .next = eval_map_next,
5599 .stop = eval_map_stop,
5600 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005601};
5602
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005603static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005604{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005605 int ret;
5606
5607 ret = tracing_check_open_get_tr(NULL);
5608 if (ret)
5609 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005610
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005611 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005612}
5613
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005614static const struct file_operations tracing_eval_map_fops = {
5615 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005616 .read = seq_read,
5617 .llseek = seq_lseek,
5618 .release = seq_release,
5619};
5620
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005621static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005622trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005623{
5624 /* Return tail of array given the head */
5625 return ptr + ptr->head.length + 1;
5626}
5627
5628static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005629trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005630 int len)
5631{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005632 struct trace_eval_map **stop;
5633 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005634 union trace_eval_map_item *map_array;
5635 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005636
5637 stop = start + len;
5638
5639 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005640 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005641 * where the head holds the module and length of array, and the
5642 * tail holds a pointer to the next list.
5643 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005644 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005645 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005646 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005647 return;
5648 }
5649
Jeremy Linton1793ed92017-05-31 16:56:46 -05005650 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005651
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005652 if (!trace_eval_maps)
5653 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005654 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005655 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005656 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005657 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005658 if (!ptr->tail.next)
5659 break;
5660 ptr = ptr->tail.next;
5661
5662 }
5663 ptr->tail.next = map_array;
5664 }
5665 map_array->head.mod = mod;
5666 map_array->head.length = len;
5667 map_array++;
5668
5669 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5670 map_array->map = **map;
5671 map_array++;
5672 }
5673 memset(map_array, 0, sizeof(*map_array));
5674
Jeremy Linton1793ed92017-05-31 16:56:46 -05005675 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005676}
5677
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005678static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005679{
Jeremy Linton681bec02017-05-31 16:56:53 -05005680 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005681 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005682}
5683
Jeremy Linton681bec02017-05-31 16:56:53 -05005684#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005685static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5686static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005687 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005688#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005689
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005690static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005691 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005692{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005693 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005694
5695 if (len <= 0)
5696 return;
5697
5698 map = start;
5699
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005700 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005701
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005702 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005703}
5704
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005705static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005706tracing_set_trace_read(struct file *filp, char __user *ubuf,
5707 size_t cnt, loff_t *ppos)
5708{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005709 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005710 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005711 int r;
5712
5713 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005714 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005715 mutex_unlock(&trace_types_lock);
5716
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005717 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005718}
5719
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005720int tracer_init(struct tracer *t, struct trace_array *tr)
5721{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005722 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005723 return t->init(tr);
5724}
5725
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005726static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005727{
5728 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005729
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005730 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005731 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005732}
5733
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005734#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005735/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005736static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5737 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005738{
5739 int cpu, ret = 0;
5740
5741 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5742 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005743 ret = ring_buffer_resize(trace_buf->buffer,
5744 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005745 if (ret < 0)
5746 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005747 per_cpu_ptr(trace_buf->data, cpu)->entries =
5748 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005749 }
5750 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005751 ret = ring_buffer_resize(trace_buf->buffer,
5752 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005753 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005754 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5755 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005756 }
5757
5758 return ret;
5759}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005760#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005761
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005762static int __tracing_resize_ring_buffer(struct trace_array *tr,
5763 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005764{
5765 int ret;
5766
5767 /*
5768 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005769 * we use the size that was given, and we can forget about
5770 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005771 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005772 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005773
Steven Rostedtb382ede62012-10-10 21:44:34 -04005774 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005775 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005776 return 0;
5777
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005778 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005779 if (ret < 0)
5780 return ret;
5781
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005782#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005783 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5784 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005785 goto out;
5786
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005787 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005788 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005789 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5790 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005791 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005792 /*
5793 * AARGH! We are left with different
5794 * size max buffer!!!!
5795 * The max buffer is our "snapshot" buffer.
5796 * When a tracer needs a snapshot (one of the
5797 * latency tracers), it swaps the max buffer
5798 * with the saved snap shot. We succeeded to
5799 * update the size of the main buffer, but failed to
5800 * update the size of the max buffer. But when we tried
5801 * to reset the main buffer to the original size, we
5802 * failed there too. This is very unlikely to
5803 * happen, but if it does, warn and kill all
5804 * tracing.
5805 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005806 WARN_ON(1);
5807 tracing_disabled = 1;
5808 }
5809 return ret;
5810 }
5811
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005812 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005813 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005814 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005815 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005816
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005817 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005818#endif /* CONFIG_TRACER_MAX_TRACE */
5819
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005820 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005821 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005822 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005823 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005824
5825 return ret;
5826}
5827
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005828ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5829 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005830{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005831 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005832
5833 mutex_lock(&trace_types_lock);
5834
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005835 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5836 /* make sure, this cpu is enabled in the mask */
5837 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5838 ret = -EINVAL;
5839 goto out;
5840 }
5841 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005842
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005843 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005844 if (ret < 0)
5845 ret = -ENOMEM;
5846
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005847out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005848 mutex_unlock(&trace_types_lock);
5849
5850 return ret;
5851}
5852
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005853
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005854/**
5855 * tracing_update_buffers - used by tracing facility to expand ring buffers
5856 *
5857 * To save on memory when the tracing is never used on a system with it
5858 * configured in. The ring buffers are set to a minimum size. But once
5859 * a user starts to use the tracing facility, then they need to grow
5860 * to their default size.
5861 *
5862 * This function is to be called when a tracer is about to be used.
5863 */
5864int tracing_update_buffers(void)
5865{
5866 int ret = 0;
5867
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005868 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005869 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005871 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005872 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005873
5874 return ret;
5875}
5876
Steven Rostedt577b7852009-02-26 23:43:05 -05005877struct trace_option_dentry;
5878
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005879static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005880create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005881
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005882/*
5883 * Used to clear out the tracer before deletion of an instance.
5884 * Must have trace_types_lock held.
5885 */
5886static void tracing_set_nop(struct trace_array *tr)
5887{
5888 if (tr->current_trace == &nop_trace)
5889 return;
5890
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005891 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005892
5893 if (tr->current_trace->reset)
5894 tr->current_trace->reset(tr);
5895
5896 tr->current_trace = &nop_trace;
5897}
5898
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005899static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005900{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005901 /* Only enable if the directory has been created already. */
5902 if (!tr->dir)
5903 return;
5904
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005905 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005906}
5907
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005908int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005909{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005910 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005911#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005912 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005913#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005914 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005915
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005916 mutex_lock(&trace_types_lock);
5917
Steven Rostedt73c51622009-03-11 13:42:01 -04005918 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005919 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005920 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005921 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005922 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005923 ret = 0;
5924 }
5925
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005926 for (t = trace_types; t; t = t->next) {
5927 if (strcmp(t->name, buf) == 0)
5928 break;
5929 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005930 if (!t) {
5931 ret = -EINVAL;
5932 goto out;
5933 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005934 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005935 goto out;
5936
Tom Zanussia35873a2019-02-13 17:42:45 -06005937#ifdef CONFIG_TRACER_SNAPSHOT
5938 if (t->use_max_tr) {
5939 arch_spin_lock(&tr->max_lock);
5940 if (tr->cond_snapshot)
5941 ret = -EBUSY;
5942 arch_spin_unlock(&tr->max_lock);
5943 if (ret)
5944 goto out;
5945 }
5946#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005947 /* Some tracers won't work on kernel command line */
5948 if (system_state < SYSTEM_RUNNING && t->noboot) {
5949 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5950 t->name);
5951 goto out;
5952 }
5953
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005954 /* Some tracers are only allowed for the top level buffer */
5955 if (!trace_ok_for_array(t, tr)) {
5956 ret = -EINVAL;
5957 goto out;
5958 }
5959
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005960 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04005961 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005962 ret = -EBUSY;
5963 goto out;
5964 }
5965
Steven Rostedt9f029e82008-11-12 15:24:24 -05005966 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005967
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005968 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005969
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005970 if (tr->current_trace->reset)
5971 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005972
Paul E. McKenney74401722018-11-06 18:44:52 -08005973 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005974 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005975
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005976#ifdef CONFIG_TRACER_MAX_TRACE
5977 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005978
5979 if (had_max_tr && !t->use_max_tr) {
5980 /*
5981 * We need to make sure that the update_max_tr sees that
5982 * current_trace changed to nop_trace to keep it from
5983 * swapping the buffers after we resize it.
5984 * The update_max_tr is called from interrupts disabled
5985 * so a synchronized_sched() is sufficient.
5986 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005987 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005988 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005989 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005990#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005991
5992#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005993 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005994 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005995 if (ret < 0)
5996 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005997 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005998#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005999
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006000 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006001 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006002 if (ret)
6003 goto out;
6004 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006005
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006006 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006007 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006008 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006009 out:
6010 mutex_unlock(&trace_types_lock);
6011
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006012 return ret;
6013}
6014
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006015static ssize_t
6016tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6017 size_t cnt, loff_t *ppos)
6018{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006019 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006020 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006021 int i;
6022 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006023 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006024
Steven Rostedt60063a62008-10-28 10:44:24 -04006025 ret = cnt;
6026
Li Zefanee6c2c12009-09-18 14:06:47 +08006027 if (cnt > MAX_TRACER_SIZE)
6028 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006029
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006030 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006031 return -EFAULT;
6032
6033 buf[cnt] = 0;
6034
6035 /* strip ending whitespace. */
6036 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6037 buf[i] = 0;
6038
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006039 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006040 if (err)
6041 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006042
Jiri Olsacf8517c2009-10-23 19:36:16 -04006043 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006044
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006045 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006046}
6047
6048static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006049tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6050 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006051{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006052 char buf[64];
6053 int r;
6054
Steven Rostedtcffae432008-05-12 21:21:00 +02006055 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006056 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006057 if (r > sizeof(buf))
6058 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006060}
6061
6062static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006063tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6064 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006065{
Hannes Eder5e398412009-02-10 19:44:34 +01006066 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006067 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006068
Peter Huewe22fe9b52011-06-07 21:58:27 +02006069 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6070 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006071 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006072
6073 *ptr = val * 1000;
6074
6075 return cnt;
6076}
6077
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006078static ssize_t
6079tracing_thresh_read(struct file *filp, char __user *ubuf,
6080 size_t cnt, loff_t *ppos)
6081{
6082 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6083}
6084
6085static ssize_t
6086tracing_thresh_write(struct file *filp, const char __user *ubuf,
6087 size_t cnt, loff_t *ppos)
6088{
6089 struct trace_array *tr = filp->private_data;
6090 int ret;
6091
6092 mutex_lock(&trace_types_lock);
6093 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6094 if (ret < 0)
6095 goto out;
6096
6097 if (tr->current_trace->update_thresh) {
6098 ret = tr->current_trace->update_thresh(tr);
6099 if (ret < 0)
6100 goto out;
6101 }
6102
6103 ret = cnt;
6104out:
6105 mutex_unlock(&trace_types_lock);
6106
6107 return ret;
6108}
6109
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006110#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006111
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006112static ssize_t
6113tracing_max_lat_read(struct file *filp, char __user *ubuf,
6114 size_t cnt, loff_t *ppos)
6115{
6116 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6117}
6118
6119static ssize_t
6120tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6121 size_t cnt, loff_t *ppos)
6122{
6123 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6124}
6125
Chen Gange428abb2015-11-10 05:15:15 +08006126#endif
6127
Steven Rostedtb3806b42008-05-12 21:20:46 +02006128static int tracing_open_pipe(struct inode *inode, struct file *filp)
6129{
Oleg Nesterov15544202013-07-23 17:25:57 +02006130 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006131 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006132 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006133
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006134 ret = tracing_check_open_get_tr(tr);
6135 if (ret)
6136 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006137
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006138 mutex_lock(&trace_types_lock);
6139
Steven Rostedtb3806b42008-05-12 21:20:46 +02006140 /* create a buffer to store the information to pass to userspace */
6141 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006142 if (!iter) {
6143 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006144 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006145 goto out;
6146 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006147
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006148 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006149 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006150
6151 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6152 ret = -ENOMEM;
6153 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306154 }
6155
Steven Rostedta3097202008-11-07 22:36:02 -05006156 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306157 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006158
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006159 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006160 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6161
David Sharp8be07092012-11-13 12:18:22 -08006162 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006163 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006164 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6165
Oleg Nesterov15544202013-07-23 17:25:57 +02006166 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006167 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006168 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006169 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006170 filp->private_data = iter;
6171
Steven Rostedt107bad82008-05-12 21:21:01 +02006172 if (iter->trace->pipe_open)
6173 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006174
Arnd Bergmannb4447862010-07-07 23:40:11 +02006175 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006176
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006177 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006178out:
6179 mutex_unlock(&trace_types_lock);
6180 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006181
6182fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006183 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006184 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006185 mutex_unlock(&trace_types_lock);
6186 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006187}
6188
6189static int tracing_release_pipe(struct inode *inode, struct file *file)
6190{
6191 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006192 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006193
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006194 mutex_lock(&trace_types_lock);
6195
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006196 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006197
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006198 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006199 iter->trace->pipe_close(iter);
6200
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006201 mutex_unlock(&trace_types_lock);
6202
Rusty Russell44623442009-01-01 10:12:23 +10306203 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006204 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006205 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006206
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006207 trace_array_put(tr);
6208
Steven Rostedtb3806b42008-05-12 21:20:46 +02006209 return 0;
6210}
6211
Al Viro9dd95742017-07-03 00:42:43 -04006212static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006213trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006214{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006215 struct trace_array *tr = iter->tr;
6216
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006217 /* Iterators are static, they should be filled or empty */
6218 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006219 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006220
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006221 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006222 /*
6223 * Always select as readable when in blocking mode
6224 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006225 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006226 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006227 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006228 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006229}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006230
Al Viro9dd95742017-07-03 00:42:43 -04006231static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006232tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6233{
6234 struct trace_iterator *iter = filp->private_data;
6235
6236 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006237}
6238
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006239/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006240static int tracing_wait_pipe(struct file *filp)
6241{
6242 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006243 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006244
6245 while (trace_empty(iter)) {
6246
6247 if ((filp->f_flags & O_NONBLOCK)) {
6248 return -EAGAIN;
6249 }
6250
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006251 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006252 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006253 * We still block if tracing is disabled, but we have never
6254 * read anything. This allows a user to cat this file, and
6255 * then enable tracing. But after we have read something,
6256 * we give an EOF when tracing is again disabled.
6257 *
6258 * iter->pos will be 0 if we haven't read anything.
6259 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006260 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006261 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006262
6263 mutex_unlock(&iter->mutex);
6264
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006265 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006266
6267 mutex_lock(&iter->mutex);
6268
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006269 if (ret)
6270 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006271 }
6272
6273 return 1;
6274}
6275
Steven Rostedtb3806b42008-05-12 21:20:46 +02006276/*
6277 * Consumer reader.
6278 */
6279static ssize_t
6280tracing_read_pipe(struct file *filp, char __user *ubuf,
6281 size_t cnt, loff_t *ppos)
6282{
6283 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006284 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006285
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006286 /*
6287 * Avoid more than one consumer on a single file descriptor
6288 * This is just a matter of traces coherency, the ring buffer itself
6289 * is protected.
6290 */
6291 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006292
6293 /* return any leftover data */
6294 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6295 if (sret != -EBUSY)
6296 goto out;
6297
6298 trace_seq_init(&iter->seq);
6299
Steven Rostedt107bad82008-05-12 21:21:01 +02006300 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006301 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6302 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006303 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006304 }
6305
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006306waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006307 sret = tracing_wait_pipe(filp);
6308 if (sret <= 0)
6309 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006310
6311 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006312 if (trace_empty(iter)) {
6313 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006314 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006315 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006316
6317 if (cnt >= PAGE_SIZE)
6318 cnt = PAGE_SIZE - 1;
6319
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006320 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006321 memset(&iter->seq, 0,
6322 sizeof(struct trace_iterator) -
6323 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006324 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006325 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006326 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006327
Lai Jiangshan4f535962009-05-18 19:35:34 +08006328 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006329 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006330 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006331 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006332 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006333
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006334 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006335 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006336 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006337 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006338 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006339 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006340 if (ret != TRACE_TYPE_NO_CONSUME)
6341 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006342
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006343 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006344 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006345
6346 /*
6347 * Setting the full flag means we reached the trace_seq buffer
6348 * size and we should leave by partial output condition above.
6349 * One of the trace_seq_* functions is not used properly.
6350 */
6351 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6352 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006353 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006354 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006355 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006356
Steven Rostedtb3806b42008-05-12 21:20:46 +02006357 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006358 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006359 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006360 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006361
6362 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006363 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006364 * entries, go back to wait for more entries.
6365 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006366 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006367 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006368
Steven Rostedt107bad82008-05-12 21:21:01 +02006369out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006370 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006371
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006372 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006373}
6374
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006375static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6376 unsigned int idx)
6377{
6378 __free_page(spd->pages[idx]);
6379}
6380
Steven Rostedt34cd4992009-02-09 12:06:29 -05006381static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006382tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006383{
6384 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006385 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006386 int ret;
6387
6388 /* Seq buffer is page-sized, exactly what we need. */
6389 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006390 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006391 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006392
6393 if (trace_seq_has_overflowed(&iter->seq)) {
6394 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006395 break;
6396 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006397
6398 /*
6399 * This should not be hit, because it should only
6400 * be set if the iter->seq overflowed. But check it
6401 * anyway to be safe.
6402 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006403 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006404 iter->seq.seq.len = save_len;
6405 break;
6406 }
6407
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006408 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006409 if (rem < count) {
6410 rem = 0;
6411 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006412 break;
6413 }
6414
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006415 if (ret != TRACE_TYPE_NO_CONSUME)
6416 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006417 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006418 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006419 rem = 0;
6420 iter->ent = NULL;
6421 break;
6422 }
6423 }
6424
6425 return rem;
6426}
6427
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006428static ssize_t tracing_splice_read_pipe(struct file *filp,
6429 loff_t *ppos,
6430 struct pipe_inode_info *pipe,
6431 size_t len,
6432 unsigned int flags)
6433{
Jens Axboe35f3d142010-05-20 10:43:18 +02006434 struct page *pages_def[PIPE_DEF_BUFFERS];
6435 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006436 struct trace_iterator *iter = filp->private_data;
6437 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006438 .pages = pages_def,
6439 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006440 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006441 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006442 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006443 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006444 };
6445 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006446 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006447 unsigned int i;
6448
Jens Axboe35f3d142010-05-20 10:43:18 +02006449 if (splice_grow_spd(pipe, &spd))
6450 return -ENOMEM;
6451
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006452 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006453
6454 if (iter->trace->splice_read) {
6455 ret = iter->trace->splice_read(iter, filp,
6456 ppos, pipe, len, flags);
6457 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006458 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006459 }
6460
6461 ret = tracing_wait_pipe(filp);
6462 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006463 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006464
Jason Wessel955b61e2010-08-05 09:22:23 -05006465 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006466 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006467 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006468 }
6469
Lai Jiangshan4f535962009-05-18 19:35:34 +08006470 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006471 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006472
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006473 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006474 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006475 spd.pages[i] = alloc_page(GFP_KERNEL);
6476 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006477 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006478
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006479 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006480
6481 /* Copy the data into the page, so we can start over. */
6482 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006483 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006484 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006485 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006486 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006487 break;
6488 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006489 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006490 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006491
Steven Rostedtf9520752009-03-02 14:04:40 -05006492 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006493 }
6494
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006495 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006496 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006497 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006498
6499 spd.nr_pages = i;
6500
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006501 if (i)
6502 ret = splice_to_pipe(pipe, &spd);
6503 else
6504 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006505out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006506 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006507 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006508
Steven Rostedt34cd4992009-02-09 12:06:29 -05006509out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006510 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006511 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006512}
6513
Steven Rostedta98a3c32008-05-12 21:20:59 +02006514static ssize_t
6515tracing_entries_read(struct file *filp, char __user *ubuf,
6516 size_t cnt, loff_t *ppos)
6517{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006518 struct inode *inode = file_inode(filp);
6519 struct trace_array *tr = inode->i_private;
6520 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006521 char buf[64];
6522 int r = 0;
6523 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006524
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006525 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006526
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006527 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006528 int cpu, buf_size_same;
6529 unsigned long size;
6530
6531 size = 0;
6532 buf_size_same = 1;
6533 /* check if all cpu sizes are same */
6534 for_each_tracing_cpu(cpu) {
6535 /* fill in the size from first enabled cpu */
6536 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006537 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6538 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006539 buf_size_same = 0;
6540 break;
6541 }
6542 }
6543
6544 if (buf_size_same) {
6545 if (!ring_buffer_expanded)
6546 r = sprintf(buf, "%lu (expanded: %lu)\n",
6547 size >> 10,
6548 trace_buf_size >> 10);
6549 else
6550 r = sprintf(buf, "%lu\n", size >> 10);
6551 } else
6552 r = sprintf(buf, "X\n");
6553 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006554 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006555
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006556 mutex_unlock(&trace_types_lock);
6557
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006558 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6559 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006560}
6561
6562static ssize_t
6563tracing_entries_write(struct file *filp, const char __user *ubuf,
6564 size_t cnt, loff_t *ppos)
6565{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006566 struct inode *inode = file_inode(filp);
6567 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006568 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006569 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006570
Peter Huewe22fe9b52011-06-07 21:58:27 +02006571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6572 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006573 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006574
6575 /* must have at least 1 entry */
6576 if (!val)
6577 return -EINVAL;
6578
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006579 /* value is in KB */
6580 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006581 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006582 if (ret < 0)
6583 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006584
Jiri Olsacf8517c2009-10-23 19:36:16 -04006585 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006586
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006587 return cnt;
6588}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006589
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006590static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006591tracing_total_entries_read(struct file *filp, char __user *ubuf,
6592 size_t cnt, loff_t *ppos)
6593{
6594 struct trace_array *tr = filp->private_data;
6595 char buf[64];
6596 int r, cpu;
6597 unsigned long size = 0, expanded_size = 0;
6598
6599 mutex_lock(&trace_types_lock);
6600 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006601 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006602 if (!ring_buffer_expanded)
6603 expanded_size += trace_buf_size >> 10;
6604 }
6605 if (ring_buffer_expanded)
6606 r = sprintf(buf, "%lu\n", size);
6607 else
6608 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6609 mutex_unlock(&trace_types_lock);
6610
6611 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6612}
6613
6614static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006615tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6616 size_t cnt, loff_t *ppos)
6617{
6618 /*
6619 * There is no need to read what the user has written, this function
6620 * is just to make sure that there is no error when "echo" is used
6621 */
6622
6623 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006624
6625 return cnt;
6626}
6627
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006628static int
6629tracing_free_buffer_release(struct inode *inode, struct file *filp)
6630{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006631 struct trace_array *tr = inode->i_private;
6632
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006633 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006634 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006635 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006636 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006637 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006638
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006639 trace_array_put(tr);
6640
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006641 return 0;
6642}
6643
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006644static ssize_t
6645tracing_mark_write(struct file *filp, const char __user *ubuf,
6646 size_t cnt, loff_t *fpos)
6647{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006648 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006649 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006650 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006651 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006652 struct print_entry *entry;
6653 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006654 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006655 int size;
6656 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006657
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006658/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006659#define FAULTED_STR "<faulted>"
6660#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006661
Steven Rostedtc76f0692008-11-07 22:36:02 -05006662 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006663 return -EINVAL;
6664
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006665 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006666 return -EINVAL;
6667
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006668 if (cnt > TRACE_BUF_SIZE)
6669 cnt = TRACE_BUF_SIZE;
6670
Steven Rostedtd696b582011-09-22 11:50:27 -04006671 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006672
Steven Rostedtd696b582011-09-22 11:50:27 -04006673 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006674 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6675
6676 /* If less than "<faulted>", then make sure we can still add that */
6677 if (cnt < FAULTED_SIZE)
6678 size += FAULTED_SIZE - cnt;
6679
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006680 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006681 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6682 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006683 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006684 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006685 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006686
6687 entry = ring_buffer_event_data(event);
6688 entry->ip = _THIS_IP_;
6689
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006690 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6691 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006692 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006693 cnt = FAULTED_SIZE;
6694 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006695 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006696 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006697
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006698 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6699 /* do not add \n before testing triggers, but add \0 */
6700 entry->buf[cnt] = '\0';
6701 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6702 }
6703
Steven Rostedtd696b582011-09-22 11:50:27 -04006704 if (entry->buf[cnt - 1] != '\n') {
6705 entry->buf[cnt] = '\n';
6706 entry->buf[cnt + 1] = '\0';
6707 } else
6708 entry->buf[cnt] = '\0';
6709
Tingwei Zhang458999c2020-10-05 10:13:15 +03006710 if (static_branch_unlikely(&trace_marker_exports_enabled))
6711 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006712 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006713
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006714 if (tt)
6715 event_triggers_post_call(tr->trace_marker_file, tt);
6716
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006717 if (written > 0)
6718 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006719
Steven Rostedtfa32e852016-07-06 15:25:08 -04006720 return written;
6721}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006722
Steven Rostedtfa32e852016-07-06 15:25:08 -04006723/* Limit it for now to 3K (including tag) */
6724#define RAW_DATA_MAX_SIZE (1024*3)
6725
6726static ssize_t
6727tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6728 size_t cnt, loff_t *fpos)
6729{
6730 struct trace_array *tr = filp->private_data;
6731 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006732 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006733 struct raw_data_entry *entry;
6734 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006735 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006736 int size;
6737 int len;
6738
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006739#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6740
Steven Rostedtfa32e852016-07-06 15:25:08 -04006741 if (tracing_disabled)
6742 return -EINVAL;
6743
6744 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6745 return -EINVAL;
6746
6747 /* The marker must at least have a tag id */
6748 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6749 return -EINVAL;
6750
6751 if (cnt > TRACE_BUF_SIZE)
6752 cnt = TRACE_BUF_SIZE;
6753
6754 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6755
Steven Rostedtfa32e852016-07-06 15:25:08 -04006756 local_save_flags(irq_flags);
6757 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006758 if (cnt < FAULT_SIZE_ID)
6759 size += FAULT_SIZE_ID - cnt;
6760
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006761 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006762 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6763 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006764 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006765 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006766 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006767
6768 entry = ring_buffer_event_data(event);
6769
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006770 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6771 if (len) {
6772 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006773 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006774 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006775 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006776 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006777
6778 __buffer_unlock_commit(buffer, event);
6779
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006780 if (written > 0)
6781 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006782
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006783 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006784}
6785
Li Zefan13f16d22009-12-08 11:16:11 +08006786static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006787{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006788 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006789 int i;
6790
6791 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006792 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006793 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006794 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6795 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006796 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006797
Li Zefan13f16d22009-12-08 11:16:11 +08006798 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006799}
6800
Tom Zanussid71bd342018-01-15 20:52:07 -06006801int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006802{
Zhaolei5079f322009-08-25 16:12:56 +08006803 int i;
6804
Zhaolei5079f322009-08-25 16:12:56 +08006805 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6806 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6807 break;
6808 }
6809 if (i == ARRAY_SIZE(trace_clocks))
6810 return -EINVAL;
6811
Zhaolei5079f322009-08-25 16:12:56 +08006812 mutex_lock(&trace_types_lock);
6813
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006814 tr->clock_id = i;
6815
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006816 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006817
David Sharp60303ed2012-10-11 16:27:52 -07006818 /*
6819 * New clock may not be consistent with the previous clock.
6820 * Reset the buffer so that it doesn't have incomparable timestamps.
6821 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006822 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006823
6824#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006825 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006826 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006827 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006828#endif
David Sharp60303ed2012-10-11 16:27:52 -07006829
Zhaolei5079f322009-08-25 16:12:56 +08006830 mutex_unlock(&trace_types_lock);
6831
Steven Rostedte1e232c2014-02-10 23:38:46 -05006832 return 0;
6833}
6834
6835static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6836 size_t cnt, loff_t *fpos)
6837{
6838 struct seq_file *m = filp->private_data;
6839 struct trace_array *tr = m->private;
6840 char buf[64];
6841 const char *clockstr;
6842 int ret;
6843
6844 if (cnt >= sizeof(buf))
6845 return -EINVAL;
6846
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006847 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006848 return -EFAULT;
6849
6850 buf[cnt] = 0;
6851
6852 clockstr = strstrip(buf);
6853
6854 ret = tracing_set_clock(tr, clockstr);
6855 if (ret)
6856 return ret;
6857
Zhaolei5079f322009-08-25 16:12:56 +08006858 *fpos += cnt;
6859
6860 return cnt;
6861}
6862
Li Zefan13f16d22009-12-08 11:16:11 +08006863static int tracing_clock_open(struct inode *inode, struct file *file)
6864{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006865 struct trace_array *tr = inode->i_private;
6866 int ret;
6867
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006868 ret = tracing_check_open_get_tr(tr);
6869 if (ret)
6870 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006871
6872 ret = single_open(file, tracing_clock_show, inode->i_private);
6873 if (ret < 0)
6874 trace_array_put(tr);
6875
6876 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006877}
6878
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006879static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6880{
6881 struct trace_array *tr = m->private;
6882
6883 mutex_lock(&trace_types_lock);
6884
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006885 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006886 seq_puts(m, "delta [absolute]\n");
6887 else
6888 seq_puts(m, "[delta] absolute\n");
6889
6890 mutex_unlock(&trace_types_lock);
6891
6892 return 0;
6893}
6894
6895static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6896{
6897 struct trace_array *tr = inode->i_private;
6898 int ret;
6899
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006900 ret = tracing_check_open_get_tr(tr);
6901 if (ret)
6902 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006903
6904 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6905 if (ret < 0)
6906 trace_array_put(tr);
6907
6908 return ret;
6909}
6910
Tom Zanussi00b41452018-01-15 20:51:39 -06006911int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6912{
6913 int ret = 0;
6914
6915 mutex_lock(&trace_types_lock);
6916
6917 if (abs && tr->time_stamp_abs_ref++)
6918 goto out;
6919
6920 if (!abs) {
6921 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6922 ret = -EINVAL;
6923 goto out;
6924 }
6925
6926 if (--tr->time_stamp_abs_ref)
6927 goto out;
6928 }
6929
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006930 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006931
6932#ifdef CONFIG_TRACER_MAX_TRACE
6933 if (tr->max_buffer.buffer)
6934 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6935#endif
6936 out:
6937 mutex_unlock(&trace_types_lock);
6938
6939 return ret;
6940}
6941
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006942struct ftrace_buffer_info {
6943 struct trace_iterator iter;
6944 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006945 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006946 unsigned int read;
6947};
6948
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006949#ifdef CONFIG_TRACER_SNAPSHOT
6950static int tracing_snapshot_open(struct inode *inode, struct file *file)
6951{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006952 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006953 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006954 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006955 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006956
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006957 ret = tracing_check_open_get_tr(tr);
6958 if (ret)
6959 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006960
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006961 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006962 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006963 if (IS_ERR(iter))
6964 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006965 } else {
6966 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006967 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006968 m = kzalloc(sizeof(*m), GFP_KERNEL);
6969 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006970 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006971 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6972 if (!iter) {
6973 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006974 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006975 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006976 ret = 0;
6977
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006978 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006979 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006980 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006981 m->private = iter;
6982 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006983 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006984out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006985 if (ret < 0)
6986 trace_array_put(tr);
6987
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006988 return ret;
6989}
6990
6991static ssize_t
6992tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6993 loff_t *ppos)
6994{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006995 struct seq_file *m = filp->private_data;
6996 struct trace_iterator *iter = m->private;
6997 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006998 unsigned long val;
6999 int ret;
7000
7001 ret = tracing_update_buffers();
7002 if (ret < 0)
7003 return ret;
7004
7005 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7006 if (ret)
7007 return ret;
7008
7009 mutex_lock(&trace_types_lock);
7010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007011 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007012 ret = -EBUSY;
7013 goto out;
7014 }
7015
Tom Zanussia35873a2019-02-13 17:42:45 -06007016 arch_spin_lock(&tr->max_lock);
7017 if (tr->cond_snapshot)
7018 ret = -EBUSY;
7019 arch_spin_unlock(&tr->max_lock);
7020 if (ret)
7021 goto out;
7022
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007023 switch (val) {
7024 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007025 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7026 ret = -EINVAL;
7027 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007028 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007029 if (tr->allocated_snapshot)
7030 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007031 break;
7032 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007033/* Only allow per-cpu swap if the ring buffer supports it */
7034#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7035 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7036 ret = -EINVAL;
7037 break;
7038 }
7039#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007040 if (tr->allocated_snapshot)
7041 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007042 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007043 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007044 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007045 if (ret < 0)
7046 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007047 local_irq_disable();
7048 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007049 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007050 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007051 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007052 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007053 local_irq_enable();
7054 break;
7055 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007056 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007057 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7058 tracing_reset_online_cpus(&tr->max_buffer);
7059 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007060 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007061 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007062 break;
7063 }
7064
7065 if (ret >= 0) {
7066 *ppos += cnt;
7067 ret = cnt;
7068 }
7069out:
7070 mutex_unlock(&trace_types_lock);
7071 return ret;
7072}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007073
7074static int tracing_snapshot_release(struct inode *inode, struct file *file)
7075{
7076 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007077 int ret;
7078
7079 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007080
7081 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007082 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007083
7084 /* If write only, the seq_file is just a stub */
7085 if (m)
7086 kfree(m->private);
7087 kfree(m);
7088
7089 return 0;
7090}
7091
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007092static int tracing_buffers_open(struct inode *inode, struct file *filp);
7093static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7094 size_t count, loff_t *ppos);
7095static int tracing_buffers_release(struct inode *inode, struct file *file);
7096static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7097 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7098
7099static int snapshot_raw_open(struct inode *inode, struct file *filp)
7100{
7101 struct ftrace_buffer_info *info;
7102 int ret;
7103
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007104 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007105 ret = tracing_buffers_open(inode, filp);
7106 if (ret < 0)
7107 return ret;
7108
7109 info = filp->private_data;
7110
7111 if (info->iter.trace->use_max_tr) {
7112 tracing_buffers_release(inode, filp);
7113 return -EBUSY;
7114 }
7115
7116 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007117 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007118
7119 return ret;
7120}
7121
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007122#endif /* CONFIG_TRACER_SNAPSHOT */
7123
7124
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007125static const struct file_operations tracing_thresh_fops = {
7126 .open = tracing_open_generic,
7127 .read = tracing_thresh_read,
7128 .write = tracing_thresh_write,
7129 .llseek = generic_file_llseek,
7130};
7131
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007132#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007133static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007134 .open = tracing_open_generic,
7135 .read = tracing_max_lat_read,
7136 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007137 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007138};
Chen Gange428abb2015-11-10 05:15:15 +08007139#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007140
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007141static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007142 .open = tracing_open_generic,
7143 .read = tracing_set_trace_read,
7144 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007145 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007146};
7147
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007148static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007149 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007150 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007151 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007152 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007153 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007154 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007155};
7156
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007157static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007158 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007159 .read = tracing_entries_read,
7160 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007161 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007162 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007163};
7164
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007165static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007166 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007167 .read = tracing_total_entries_read,
7168 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007169 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007170};
7171
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007172static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007173 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007174 .write = tracing_free_buffer_write,
7175 .release = tracing_free_buffer_release,
7176};
7177
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007178static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007179 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007180 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007181 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007182 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007183};
7184
Steven Rostedtfa32e852016-07-06 15:25:08 -04007185static const struct file_operations tracing_mark_raw_fops = {
7186 .open = tracing_open_generic_tr,
7187 .write = tracing_mark_raw_write,
7188 .llseek = generic_file_llseek,
7189 .release = tracing_release_generic_tr,
7190};
7191
Zhaolei5079f322009-08-25 16:12:56 +08007192static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007193 .open = tracing_clock_open,
7194 .read = seq_read,
7195 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007196 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007197 .write = tracing_clock_write,
7198};
7199
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007200static const struct file_operations trace_time_stamp_mode_fops = {
7201 .open = tracing_time_stamp_mode_open,
7202 .read = seq_read,
7203 .llseek = seq_lseek,
7204 .release = tracing_single_release_tr,
7205};
7206
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007207#ifdef CONFIG_TRACER_SNAPSHOT
7208static const struct file_operations snapshot_fops = {
7209 .open = tracing_snapshot_open,
7210 .read = seq_read,
7211 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007212 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007213 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007214};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007215
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007216static const struct file_operations snapshot_raw_fops = {
7217 .open = snapshot_raw_open,
7218 .read = tracing_buffers_read,
7219 .release = tracing_buffers_release,
7220 .splice_read = tracing_buffers_splice_read,
7221 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007222};
7223
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007224#endif /* CONFIG_TRACER_SNAPSHOT */
7225
Tom Zanussi8a062902019-03-31 18:48:15 -05007226#define TRACING_LOG_ERRS_MAX 8
7227#define TRACING_LOG_LOC_MAX 128
7228
7229#define CMD_PREFIX " Command: "
7230
7231struct err_info {
7232 const char **errs; /* ptr to loc-specific array of err strings */
7233 u8 type; /* index into errs -> specific err string */
7234 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7235 u64 ts;
7236};
7237
7238struct tracing_log_err {
7239 struct list_head list;
7240 struct err_info info;
7241 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7242 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7243};
7244
Tom Zanussi8a062902019-03-31 18:48:15 -05007245static DEFINE_MUTEX(tracing_err_log_lock);
7246
YueHaibingff585c52019-06-14 23:32:10 +08007247static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007248{
7249 struct tracing_log_err *err;
7250
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007251 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007252 err = kzalloc(sizeof(*err), GFP_KERNEL);
7253 if (!err)
7254 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007255 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007256
7257 return err;
7258 }
7259
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007260 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007261 list_del(&err->list);
7262
7263 return err;
7264}
7265
7266/**
7267 * err_pos - find the position of a string within a command for error careting
7268 * @cmd: The tracing command that caused the error
7269 * @str: The string to position the caret at within @cmd
7270 *
7271 * Finds the position of the first occurence of @str within @cmd. The
7272 * return value can be passed to tracing_log_err() for caret placement
7273 * within @cmd.
7274 *
7275 * Returns the index within @cmd of the first occurence of @str or 0
7276 * if @str was not found.
7277 */
7278unsigned int err_pos(char *cmd, const char *str)
7279{
7280 char *found;
7281
7282 if (WARN_ON(!strlen(cmd)))
7283 return 0;
7284
7285 found = strstr(cmd, str);
7286 if (found)
7287 return found - cmd;
7288
7289 return 0;
7290}
7291
7292/**
7293 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007294 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007295 * @loc: A string describing where the error occurred
7296 * @cmd: The tracing command that caused the error
7297 * @errs: The array of loc-specific static error strings
7298 * @type: The index into errs[], which produces the specific static err string
7299 * @pos: The position the caret should be placed in the cmd
7300 *
7301 * Writes an error into tracing/error_log of the form:
7302 *
7303 * <loc>: error: <text>
7304 * Command: <cmd>
7305 * ^
7306 *
7307 * tracing/error_log is a small log file containing the last
7308 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7309 * unless there has been a tracing error, and the error log can be
7310 * cleared and have its memory freed by writing the empty string in
7311 * truncation mode to it i.e. echo > tracing/error_log.
7312 *
7313 * NOTE: the @errs array along with the @type param are used to
7314 * produce a static error string - this string is not copied and saved
7315 * when the error is logged - only a pointer to it is saved. See
7316 * existing callers for examples of how static strings are typically
7317 * defined for use with tracing_log_err().
7318 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007319void tracing_log_err(struct trace_array *tr,
7320 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007321 const char **errs, u8 type, u8 pos)
7322{
7323 struct tracing_log_err *err;
7324
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007325 if (!tr)
7326 tr = &global_trace;
7327
Tom Zanussi8a062902019-03-31 18:48:15 -05007328 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007329 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007330 if (PTR_ERR(err) == -ENOMEM) {
7331 mutex_unlock(&tracing_err_log_lock);
7332 return;
7333 }
7334
7335 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7336 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7337
7338 err->info.errs = errs;
7339 err->info.type = type;
7340 err->info.pos = pos;
7341 err->info.ts = local_clock();
7342
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007343 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007344 mutex_unlock(&tracing_err_log_lock);
7345}
7346
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007347static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007348{
7349 struct tracing_log_err *err, *next;
7350
7351 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007352 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007353 list_del(&err->list);
7354 kfree(err);
7355 }
7356
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007357 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007358 mutex_unlock(&tracing_err_log_lock);
7359}
7360
7361static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7362{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007363 struct trace_array *tr = m->private;
7364
Tom Zanussi8a062902019-03-31 18:48:15 -05007365 mutex_lock(&tracing_err_log_lock);
7366
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007367 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007368}
7369
7370static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7371{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007372 struct trace_array *tr = m->private;
7373
7374 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007375}
7376
7377static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7378{
7379 mutex_unlock(&tracing_err_log_lock);
7380}
7381
7382static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7383{
7384 u8 i;
7385
7386 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7387 seq_putc(m, ' ');
7388 for (i = 0; i < pos; i++)
7389 seq_putc(m, ' ');
7390 seq_puts(m, "^\n");
7391}
7392
7393static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7394{
7395 struct tracing_log_err *err = v;
7396
7397 if (err) {
7398 const char *err_text = err->info.errs[err->info.type];
7399 u64 sec = err->info.ts;
7400 u32 nsec;
7401
7402 nsec = do_div(sec, NSEC_PER_SEC);
7403 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7404 err->loc, err_text);
7405 seq_printf(m, "%s", err->cmd);
7406 tracing_err_log_show_pos(m, err->info.pos);
7407 }
7408
7409 return 0;
7410}
7411
7412static const struct seq_operations tracing_err_log_seq_ops = {
7413 .start = tracing_err_log_seq_start,
7414 .next = tracing_err_log_seq_next,
7415 .stop = tracing_err_log_seq_stop,
7416 .show = tracing_err_log_seq_show
7417};
7418
7419static int tracing_err_log_open(struct inode *inode, struct file *file)
7420{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007421 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007422 int ret = 0;
7423
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007424 ret = tracing_check_open_get_tr(tr);
7425 if (ret)
7426 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007427
Tom Zanussi8a062902019-03-31 18:48:15 -05007428 /* If this file was opened for write, then erase contents */
7429 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007430 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007431
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007432 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007433 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007434 if (!ret) {
7435 struct seq_file *m = file->private_data;
7436 m->private = tr;
7437 } else {
7438 trace_array_put(tr);
7439 }
7440 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007441 return ret;
7442}
7443
7444static ssize_t tracing_err_log_write(struct file *file,
7445 const char __user *buffer,
7446 size_t count, loff_t *ppos)
7447{
7448 return count;
7449}
7450
Takeshi Misawad122ed62019-06-28 19:56:40 +09007451static int tracing_err_log_release(struct inode *inode, struct file *file)
7452{
7453 struct trace_array *tr = inode->i_private;
7454
7455 trace_array_put(tr);
7456
7457 if (file->f_mode & FMODE_READ)
7458 seq_release(inode, file);
7459
7460 return 0;
7461}
7462
Tom Zanussi8a062902019-03-31 18:48:15 -05007463static const struct file_operations tracing_err_log_fops = {
7464 .open = tracing_err_log_open,
7465 .write = tracing_err_log_write,
7466 .read = seq_read,
7467 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007468 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007469};
7470
Steven Rostedt2cadf912008-12-01 22:20:19 -05007471static int tracing_buffers_open(struct inode *inode, struct file *filp)
7472{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007473 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007474 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007475 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007476
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007477 ret = tracing_check_open_get_tr(tr);
7478 if (ret)
7479 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007480
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007481 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007482 if (!info) {
7483 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007484 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007485 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007486
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007487 mutex_lock(&trace_types_lock);
7488
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007489 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007490 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007491 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007492 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007493 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007494 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007495 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007496
7497 filp->private_data = info;
7498
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007499 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007500
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007501 mutex_unlock(&trace_types_lock);
7502
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007503 ret = nonseekable_open(inode, filp);
7504 if (ret < 0)
7505 trace_array_put(tr);
7506
7507 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007508}
7509
Al Viro9dd95742017-07-03 00:42:43 -04007510static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007511tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7512{
7513 struct ftrace_buffer_info *info = filp->private_data;
7514 struct trace_iterator *iter = &info->iter;
7515
7516 return trace_poll(iter, filp, poll_table);
7517}
7518
Steven Rostedt2cadf912008-12-01 22:20:19 -05007519static ssize_t
7520tracing_buffers_read(struct file *filp, char __user *ubuf,
7521 size_t count, loff_t *ppos)
7522{
7523 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007524 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007525 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007526 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007527
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007528 if (!count)
7529 return 0;
7530
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007531#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007532 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7533 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007534#endif
7535
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007536 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007537 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007538 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007539 if (IS_ERR(info->spare)) {
7540 ret = PTR_ERR(info->spare);
7541 info->spare = NULL;
7542 } else {
7543 info->spare_cpu = iter->cpu_file;
7544 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007545 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007546 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007547 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007548
Steven Rostedt2cadf912008-12-01 22:20:19 -05007549 /* Do we have previous read data to read? */
7550 if (info->read < PAGE_SIZE)
7551 goto read;
7552
Steven Rostedtb6273442013-02-28 13:44:11 -05007553 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007554 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007555 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007556 &info->spare,
7557 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007558 iter->cpu_file, 0);
7559 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007560
7561 if (ret < 0) {
7562 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007563 if ((filp->f_flags & O_NONBLOCK))
7564 return -EAGAIN;
7565
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007566 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007567 if (ret)
7568 return ret;
7569
Steven Rostedtb6273442013-02-28 13:44:11 -05007570 goto again;
7571 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007572 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007573 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007574
Steven Rostedt436fc282011-10-14 10:44:25 -04007575 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007576 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007577 size = PAGE_SIZE - info->read;
7578 if (size > count)
7579 size = count;
7580
7581 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007582 if (ret == size)
7583 return -EFAULT;
7584
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007585 size -= ret;
7586
Steven Rostedt2cadf912008-12-01 22:20:19 -05007587 *ppos += size;
7588 info->read += size;
7589
7590 return size;
7591}
7592
7593static int tracing_buffers_release(struct inode *inode, struct file *file)
7594{
7595 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007596 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007597
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007598 mutex_lock(&trace_types_lock);
7599
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007600 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007601
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007602 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007603
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007604 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007605 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007606 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007607 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007608
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007609 mutex_unlock(&trace_types_lock);
7610
Steven Rostedt2cadf912008-12-01 22:20:19 -05007611 return 0;
7612}
7613
7614struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007615 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007616 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007617 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007618 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007619};
7620
Jann Hornb9872222019-04-04 23:59:25 +02007621static void buffer_ref_release(struct buffer_ref *ref)
7622{
7623 if (!refcount_dec_and_test(&ref->refcount))
7624 return;
7625 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7626 kfree(ref);
7627}
7628
Steven Rostedt2cadf912008-12-01 22:20:19 -05007629static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7630 struct pipe_buffer *buf)
7631{
7632 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7633
Jann Hornb9872222019-04-04 23:59:25 +02007634 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007635 buf->private = 0;
7636}
7637
Matthew Wilcox15fab632019-04-05 14:02:10 -07007638static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007639 struct pipe_buffer *buf)
7640{
7641 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7642
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007643 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007644 return false;
7645
Jann Hornb9872222019-04-04 23:59:25 +02007646 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007647 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007648}
7649
7650/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007651static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007652 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007653 .get = buffer_pipe_buf_get,
7654};
7655
7656/*
7657 * Callback from splice_to_pipe(), if we need to release some pages
7658 * at the end of the spd in case we error'ed out in filling the pipe.
7659 */
7660static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7661{
7662 struct buffer_ref *ref =
7663 (struct buffer_ref *)spd->partial[i].private;
7664
Jann Hornb9872222019-04-04 23:59:25 +02007665 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007666 spd->partial[i].private = 0;
7667}
7668
7669static ssize_t
7670tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7671 struct pipe_inode_info *pipe, size_t len,
7672 unsigned int flags)
7673{
7674 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007675 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007676 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7677 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007678 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007679 .pages = pages_def,
7680 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007681 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007682 .ops = &buffer_pipe_buf_ops,
7683 .spd_release = buffer_spd_release,
7684 };
7685 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007686 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007687 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007688
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007689#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007690 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7691 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007692#endif
7693
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007694 if (*ppos & (PAGE_SIZE - 1))
7695 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007696
7697 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007698 if (len < PAGE_SIZE)
7699 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007700 len &= PAGE_MASK;
7701 }
7702
Al Viro1ae22932016-09-17 18:31:46 -04007703 if (splice_grow_spd(pipe, &spd))
7704 return -ENOMEM;
7705
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007706 again:
7707 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007708 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007709
Al Viroa786c062014-04-11 12:01:03 -04007710 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007711 struct page *page;
7712 int r;
7713
7714 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007715 if (!ref) {
7716 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007717 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007718 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007719
Jann Hornb9872222019-04-04 23:59:25 +02007720 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007721 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007722 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007723 if (IS_ERR(ref->page)) {
7724 ret = PTR_ERR(ref->page);
7725 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007726 kfree(ref);
7727 break;
7728 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007729 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007730
7731 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007732 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007733 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007734 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7735 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007736 kfree(ref);
7737 break;
7738 }
7739
Steven Rostedt2cadf912008-12-01 22:20:19 -05007740 page = virt_to_page(ref->page);
7741
7742 spd.pages[i] = page;
7743 spd.partial[i].len = PAGE_SIZE;
7744 spd.partial[i].offset = 0;
7745 spd.partial[i].private = (unsigned long)ref;
7746 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007747 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007748
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007749 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007750 }
7751
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007752 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007753 spd.nr_pages = i;
7754
7755 /* did we read anything? */
7756 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007757 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007758 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007759
Al Viro1ae22932016-09-17 18:31:46 -04007760 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007761 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007762 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007763
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007764 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007765 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007766 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007767
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007768 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007769 }
7770
7771 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007772out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007773 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007774
Steven Rostedt2cadf912008-12-01 22:20:19 -05007775 return ret;
7776}
7777
7778static const struct file_operations tracing_buffers_fops = {
7779 .open = tracing_buffers_open,
7780 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007781 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007782 .release = tracing_buffers_release,
7783 .splice_read = tracing_buffers_splice_read,
7784 .llseek = no_llseek,
7785};
7786
Steven Rostedtc8d77182009-04-29 18:03:45 -04007787static ssize_t
7788tracing_stats_read(struct file *filp, char __user *ubuf,
7789 size_t count, loff_t *ppos)
7790{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007791 struct inode *inode = file_inode(filp);
7792 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007793 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007794 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007795 struct trace_seq *s;
7796 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007797 unsigned long long t;
7798 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007799
Li Zefane4f2d102009-06-15 10:57:28 +08007800 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007801 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007802 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007803
7804 trace_seq_init(s);
7805
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007806 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007807 trace_seq_printf(s, "entries: %ld\n", cnt);
7808
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007809 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007810 trace_seq_printf(s, "overrun: %ld\n", cnt);
7811
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007812 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007813 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7814
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007815 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007816 trace_seq_printf(s, "bytes: %ld\n", cnt);
7817
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007818 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007819 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007820 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007821 usec_rem = do_div(t, USEC_PER_SEC);
7822 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7823 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007824
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007825 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007826 usec_rem = do_div(t, USEC_PER_SEC);
7827 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7828 } else {
7829 /* counter or tsc mode for trace_clock */
7830 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007831 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007832
7833 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007834 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007835 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007836
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007837 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007838 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7839
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007840 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007841 trace_seq_printf(s, "read events: %ld\n", cnt);
7842
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007843 count = simple_read_from_buffer(ubuf, count, ppos,
7844 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007845
7846 kfree(s);
7847
7848 return count;
7849}
7850
7851static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007852 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007853 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007854 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007855 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007856};
7857
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007858#ifdef CONFIG_DYNAMIC_FTRACE
7859
7860static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007861tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007862 size_t cnt, loff_t *ppos)
7863{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007864 ssize_t ret;
7865 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007866 int r;
7867
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007868 /* 256 should be plenty to hold the amount needed */
7869 buf = kmalloc(256, GFP_KERNEL);
7870 if (!buf)
7871 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007872
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007873 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7874 ftrace_update_tot_cnt,
7875 ftrace_number_of_pages,
7876 ftrace_number_of_groups);
7877
7878 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7879 kfree(buf);
7880 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007881}
7882
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007883static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007884 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007885 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007886 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007887};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007888#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007889
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007890#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7891static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007892ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007893 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007894 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007895{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007896 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007897}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007898
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007899static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007900ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007901 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007902 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007903{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007904 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007905 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007906
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007907 if (mapper)
7908 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007909
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007910 if (count) {
7911
7912 if (*count <= 0)
7913 return;
7914
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007915 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007916 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007917
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007918 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007919}
7920
7921static int
7922ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7923 struct ftrace_probe_ops *ops, void *data)
7924{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007925 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007926 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007927
7928 seq_printf(m, "%ps:", (void *)ip);
7929
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007930 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007931
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007932 if (mapper)
7933 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7934
7935 if (count)
7936 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007937 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007938 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007939
7940 return 0;
7941}
7942
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007943static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007944ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007945 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007946{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007947 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007948
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007949 if (!mapper) {
7950 mapper = allocate_ftrace_func_mapper();
7951 if (!mapper)
7952 return -ENOMEM;
7953 *data = mapper;
7954 }
7955
7956 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007957}
7958
7959static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007960ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007961 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007962{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007963 struct ftrace_func_mapper *mapper = data;
7964
7965 if (!ip) {
7966 if (!mapper)
7967 return;
7968 free_ftrace_func_mapper(mapper, NULL);
7969 return;
7970 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007971
7972 ftrace_func_mapper_remove_ip(mapper, ip);
7973}
7974
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007975static struct ftrace_probe_ops snapshot_probe_ops = {
7976 .func = ftrace_snapshot,
7977 .print = ftrace_snapshot_print,
7978};
7979
7980static struct ftrace_probe_ops snapshot_count_probe_ops = {
7981 .func = ftrace_count_snapshot,
7982 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007983 .init = ftrace_snapshot_init,
7984 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007985};
7986
7987static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007988ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007989 char *glob, char *cmd, char *param, int enable)
7990{
7991 struct ftrace_probe_ops *ops;
7992 void *count = (void *)-1;
7993 char *number;
7994 int ret;
7995
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007996 if (!tr)
7997 return -ENODEV;
7998
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007999 /* hash funcs only work with set_ftrace_filter */
8000 if (!enable)
8001 return -EINVAL;
8002
8003 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8004
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008005 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008006 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008007
8008 if (!param)
8009 goto out_reg;
8010
8011 number = strsep(&param, ":");
8012
8013 if (!strlen(number))
8014 goto out_reg;
8015
8016 /*
8017 * We use the callback data field (which is a pointer)
8018 * as our counter.
8019 */
8020 ret = kstrtoul(number, 0, (unsigned long *)&count);
8021 if (ret)
8022 return ret;
8023
8024 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008025 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008026 if (ret < 0)
8027 goto out;
8028
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008029 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008030
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008031 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008032 return ret < 0 ? ret : 0;
8033}
8034
8035static struct ftrace_func_command ftrace_snapshot_cmd = {
8036 .name = "snapshot",
8037 .func = ftrace_trace_snapshot_callback,
8038};
8039
Tom Zanussi38de93a2013-10-24 08:34:18 -05008040static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008041{
8042 return register_ftrace_command(&ftrace_snapshot_cmd);
8043}
8044#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008045static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008046#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008047
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008048static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008049{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008050 if (WARN_ON(!tr->dir))
8051 return ERR_PTR(-ENODEV);
8052
8053 /* Top directory uses NULL as the parent */
8054 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8055 return NULL;
8056
8057 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008058 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008059}
8060
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008061static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8062{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008063 struct dentry *d_tracer;
8064
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008065 if (tr->percpu_dir)
8066 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008067
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008068 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008069 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008070 return NULL;
8071
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008072 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008073
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008074 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008075 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008077 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008078}
8079
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008080static struct dentry *
8081trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8082 void *data, long cpu, const struct file_operations *fops)
8083{
8084 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8085
8086 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008087 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008088 return ret;
8089}
8090
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008091static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008092tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008093{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008094 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008095 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008096 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008097
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008098 if (!d_percpu)
8099 return;
8100
Steven Rostedtdd49a382010-10-20 21:51:26 -04008101 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008102 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008103 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008104 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008105 return;
8106 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008107
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008108 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008109 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008110 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008111
8112 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008113 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008114 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008115
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008116 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008117 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008118
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008119 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008120 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008121
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008122 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008123 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008124
8125#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008126 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008127 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008128
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008129 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008130 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008131#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008132}
8133
Steven Rostedt60a11772008-05-12 21:20:44 +02008134#ifdef CONFIG_FTRACE_SELFTEST
8135/* Let selftest have access to static functions in this file */
8136#include "trace_selftest.c"
8137#endif
8138
Steven Rostedt577b7852009-02-26 23:43:05 -05008139static ssize_t
8140trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8141 loff_t *ppos)
8142{
8143 struct trace_option_dentry *topt = filp->private_data;
8144 char *buf;
8145
8146 if (topt->flags->val & topt->opt->bit)
8147 buf = "1\n";
8148 else
8149 buf = "0\n";
8150
8151 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8152}
8153
8154static ssize_t
8155trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8156 loff_t *ppos)
8157{
8158 struct trace_option_dentry *topt = filp->private_data;
8159 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008160 int ret;
8161
Peter Huewe22fe9b52011-06-07 21:58:27 +02008162 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8163 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008164 return ret;
8165
Li Zefan8d18eaa2009-12-08 11:17:06 +08008166 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008167 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008168
8169 if (!!(topt->flags->val & topt->opt->bit) != val) {
8170 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008171 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008172 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008173 mutex_unlock(&trace_types_lock);
8174 if (ret)
8175 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008176 }
8177
8178 *ppos += cnt;
8179
8180 return cnt;
8181}
8182
8183
8184static const struct file_operations trace_options_fops = {
8185 .open = tracing_open_generic,
8186 .read = trace_options_read,
8187 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008188 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008189};
8190
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008191/*
8192 * In order to pass in both the trace_array descriptor as well as the index
8193 * to the flag that the trace option file represents, the trace_array
8194 * has a character array of trace_flags_index[], which holds the index
8195 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8196 * The address of this character array is passed to the flag option file
8197 * read/write callbacks.
8198 *
8199 * In order to extract both the index and the trace_array descriptor,
8200 * get_tr_index() uses the following algorithm.
8201 *
8202 * idx = *ptr;
8203 *
8204 * As the pointer itself contains the address of the index (remember
8205 * index[1] == 1).
8206 *
8207 * Then to get the trace_array descriptor, by subtracting that index
8208 * from the ptr, we get to the start of the index itself.
8209 *
8210 * ptr - idx == &index[0]
8211 *
8212 * Then a simple container_of() from that pointer gets us to the
8213 * trace_array descriptor.
8214 */
8215static void get_tr_index(void *data, struct trace_array **ptr,
8216 unsigned int *pindex)
8217{
8218 *pindex = *(unsigned char *)data;
8219
8220 *ptr = container_of(data - *pindex, struct trace_array,
8221 trace_flags_index);
8222}
8223
Steven Rostedta8259072009-02-26 22:19:12 -05008224static ssize_t
8225trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8226 loff_t *ppos)
8227{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008228 void *tr_index = filp->private_data;
8229 struct trace_array *tr;
8230 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008231 char *buf;
8232
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008233 get_tr_index(tr_index, &tr, &index);
8234
8235 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008236 buf = "1\n";
8237 else
8238 buf = "0\n";
8239
8240 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8241}
8242
8243static ssize_t
8244trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8245 loff_t *ppos)
8246{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008247 void *tr_index = filp->private_data;
8248 struct trace_array *tr;
8249 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008250 unsigned long val;
8251 int ret;
8252
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008253 get_tr_index(tr_index, &tr, &index);
8254
Peter Huewe22fe9b52011-06-07 21:58:27 +02008255 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8256 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008257 return ret;
8258
Zhaoleif2d84b62009-08-07 18:55:48 +08008259 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008260 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008261
Prateek Sood3a53acf2019-12-10 09:15:16 +00008262 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008263 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008264 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008265 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008266 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008267
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008268 if (ret < 0)
8269 return ret;
8270
Steven Rostedta8259072009-02-26 22:19:12 -05008271 *ppos += cnt;
8272
8273 return cnt;
8274}
8275
Steven Rostedta8259072009-02-26 22:19:12 -05008276static const struct file_operations trace_options_core_fops = {
8277 .open = tracing_open_generic,
8278 .read = trace_options_core_read,
8279 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008280 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008281};
8282
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008283struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008284 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008285 struct dentry *parent,
8286 void *data,
8287 const struct file_operations *fops)
8288{
8289 struct dentry *ret;
8290
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008291 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008292 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008293 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008294
8295 return ret;
8296}
8297
8298
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008299static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008300{
8301 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008302
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008303 if (tr->options)
8304 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008305
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008306 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008307 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008308 return NULL;
8309
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008310 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008311 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008312 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008313 return NULL;
8314 }
8315
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008316 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008317}
8318
Steven Rostedt577b7852009-02-26 23:43:05 -05008319static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008320create_trace_option_file(struct trace_array *tr,
8321 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008322 struct tracer_flags *flags,
8323 struct tracer_opt *opt)
8324{
8325 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008326
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008327 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008328 if (!t_options)
8329 return;
8330
8331 topt->flags = flags;
8332 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008333 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008334
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008335 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008336 &trace_options_fops);
8337
Steven Rostedt577b7852009-02-26 23:43:05 -05008338}
8339
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008340static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008341create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008342{
8343 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008344 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008345 struct tracer_flags *flags;
8346 struct tracer_opt *opts;
8347 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008348 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008349
8350 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008351 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008352
8353 flags = tracer->flags;
8354
8355 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008356 return;
8357
8358 /*
8359 * If this is an instance, only create flags for tracers
8360 * the instance may have.
8361 */
8362 if (!trace_ok_for_array(tracer, tr))
8363 return;
8364
8365 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008366 /* Make sure there's no duplicate flags. */
8367 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008368 return;
8369 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008370
8371 opts = flags->opts;
8372
8373 for (cnt = 0; opts[cnt].name; cnt++)
8374 ;
8375
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008376 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008377 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008378 return;
8379
8380 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8381 GFP_KERNEL);
8382 if (!tr_topts) {
8383 kfree(topts);
8384 return;
8385 }
8386
8387 tr->topts = tr_topts;
8388 tr->topts[tr->nr_topts].tracer = tracer;
8389 tr->topts[tr->nr_topts].topts = topts;
8390 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008391
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008392 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008393 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008394 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008395 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008396 "Failed to create trace option: %s",
8397 opts[cnt].name);
8398 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008399}
8400
Steven Rostedta8259072009-02-26 22:19:12 -05008401static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008402create_trace_option_core_file(struct trace_array *tr,
8403 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008404{
8405 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008406
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008407 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008408 if (!t_options)
8409 return NULL;
8410
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008411 return trace_create_file(option, 0644, t_options,
8412 (void *)&tr->trace_flags_index[index],
8413 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008414}
8415
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008416static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008417{
8418 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008419 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008420 int i;
8421
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008422 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008423 if (!t_options)
8424 return;
8425
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008426 for (i = 0; trace_options[i]; i++) {
8427 if (top_level ||
8428 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8429 create_trace_option_core_file(tr, trace_options[i], i);
8430 }
Steven Rostedta8259072009-02-26 22:19:12 -05008431}
8432
Steven Rostedt499e5472012-02-22 15:50:28 -05008433static ssize_t
8434rb_simple_read(struct file *filp, char __user *ubuf,
8435 size_t cnt, loff_t *ppos)
8436{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008437 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008438 char buf[64];
8439 int r;
8440
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008441 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008442 r = sprintf(buf, "%d\n", r);
8443
8444 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8445}
8446
8447static ssize_t
8448rb_simple_write(struct file *filp, const char __user *ubuf,
8449 size_t cnt, loff_t *ppos)
8450{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008451 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008452 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008453 unsigned long val;
8454 int ret;
8455
8456 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8457 if (ret)
8458 return ret;
8459
8460 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008461 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008462 if (!!val == tracer_tracing_is_on(tr)) {
8463 val = 0; /* do nothing */
8464 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008465 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008466 if (tr->current_trace->start)
8467 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008468 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008469 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008470 if (tr->current_trace->stop)
8471 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008472 }
8473 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008474 }
8475
8476 (*ppos)++;
8477
8478 return cnt;
8479}
8480
8481static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008482 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008483 .read = rb_simple_read,
8484 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008485 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008486 .llseek = default_llseek,
8487};
8488
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008489static ssize_t
8490buffer_percent_read(struct file *filp, char __user *ubuf,
8491 size_t cnt, loff_t *ppos)
8492{
8493 struct trace_array *tr = filp->private_data;
8494 char buf[64];
8495 int r;
8496
8497 r = tr->buffer_percent;
8498 r = sprintf(buf, "%d\n", r);
8499
8500 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8501}
8502
8503static ssize_t
8504buffer_percent_write(struct file *filp, const char __user *ubuf,
8505 size_t cnt, loff_t *ppos)
8506{
8507 struct trace_array *tr = filp->private_data;
8508 unsigned long val;
8509 int ret;
8510
8511 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8512 if (ret)
8513 return ret;
8514
8515 if (val > 100)
8516 return -EINVAL;
8517
8518 if (!val)
8519 val = 1;
8520
8521 tr->buffer_percent = val;
8522
8523 (*ppos)++;
8524
8525 return cnt;
8526}
8527
8528static const struct file_operations buffer_percent_fops = {
8529 .open = tracing_open_generic_tr,
8530 .read = buffer_percent_read,
8531 .write = buffer_percent_write,
8532 .release = tracing_release_generic_tr,
8533 .llseek = default_llseek,
8534};
8535
YueHaibingff585c52019-06-14 23:32:10 +08008536static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008537
8538static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008539init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008540
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008541static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008542allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008543{
8544 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008545
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008546 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008547
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008548 buf->tr = tr;
8549
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008550 buf->buffer = ring_buffer_alloc(size, rb_flags);
8551 if (!buf->buffer)
8552 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008553
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008554 buf->data = alloc_percpu(struct trace_array_cpu);
8555 if (!buf->data) {
8556 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008557 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008558 return -ENOMEM;
8559 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008560
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008561 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008562 set_buffer_entries(&tr->array_buffer,
8563 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008564
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008565 return 0;
8566}
8567
8568static int allocate_trace_buffers(struct trace_array *tr, int size)
8569{
8570 int ret;
8571
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008572 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008573 if (ret)
8574 return ret;
8575
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008576#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008577 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8578 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008579 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008580 ring_buffer_free(tr->array_buffer.buffer);
8581 tr->array_buffer.buffer = NULL;
8582 free_percpu(tr->array_buffer.data);
8583 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008584 return -ENOMEM;
8585 }
8586 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008587
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008588 /*
8589 * Only the top level trace array gets its snapshot allocated
8590 * from the kernel command line.
8591 */
8592 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008593#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008594
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008595 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008596}
8597
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008598static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008599{
8600 if (buf->buffer) {
8601 ring_buffer_free(buf->buffer);
8602 buf->buffer = NULL;
8603 free_percpu(buf->data);
8604 buf->data = NULL;
8605 }
8606}
8607
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008608static void free_trace_buffers(struct trace_array *tr)
8609{
8610 if (!tr)
8611 return;
8612
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008613 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008614
8615#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008616 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008617#endif
8618}
8619
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008620static void init_trace_flags_index(struct trace_array *tr)
8621{
8622 int i;
8623
8624 /* Used by the trace options files */
8625 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8626 tr->trace_flags_index[i] = i;
8627}
8628
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008629static void __update_tracer_options(struct trace_array *tr)
8630{
8631 struct tracer *t;
8632
8633 for (t = trace_types; t; t = t->next)
8634 add_tracer_options(tr, t);
8635}
8636
8637static void update_tracer_options(struct trace_array *tr)
8638{
8639 mutex_lock(&trace_types_lock);
8640 __update_tracer_options(tr);
8641 mutex_unlock(&trace_types_lock);
8642}
8643
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008644/* Must have trace_types_lock held */
8645struct trace_array *trace_array_find(const char *instance)
8646{
8647 struct trace_array *tr, *found = NULL;
8648
8649 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8650 if (tr->name && strcmp(tr->name, instance) == 0) {
8651 found = tr;
8652 break;
8653 }
8654 }
8655
8656 return found;
8657}
8658
8659struct trace_array *trace_array_find_get(const char *instance)
8660{
8661 struct trace_array *tr;
8662
8663 mutex_lock(&trace_types_lock);
8664 tr = trace_array_find(instance);
8665 if (tr)
8666 tr->ref++;
8667 mutex_unlock(&trace_types_lock);
8668
8669 return tr;
8670}
8671
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008672static int trace_array_create_dir(struct trace_array *tr)
8673{
8674 int ret;
8675
8676 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8677 if (!tr->dir)
8678 return -EINVAL;
8679
8680 ret = event_trace_add_tracer(tr->dir, tr);
8681 if (ret)
8682 tracefs_remove(tr->dir);
8683
8684 init_tracer_tracefs(tr, tr->dir);
8685 __update_tracer_options(tr);
8686
8687 return ret;
8688}
8689
Divya Indi28879782019-11-20 11:08:38 -08008690static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008691{
Steven Rostedt277ba042012-08-03 16:10:49 -04008692 struct trace_array *tr;
8693 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008694
Steven Rostedt277ba042012-08-03 16:10:49 -04008695 ret = -ENOMEM;
8696 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8697 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008698 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008699
8700 tr->name = kstrdup(name, GFP_KERNEL);
8701 if (!tr->name)
8702 goto out_free_tr;
8703
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008704 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8705 goto out_free_tr;
8706
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008707 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008708
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008709 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8710
Steven Rostedt277ba042012-08-03 16:10:49 -04008711 raw_spin_lock_init(&tr->start_lock);
8712
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008713 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8714
Steven Rostedt277ba042012-08-03 16:10:49 -04008715 tr->current_trace = &nop_trace;
8716
8717 INIT_LIST_HEAD(&tr->systems);
8718 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008719 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008720 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008721
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008722 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008723 goto out_free_tr;
8724
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008725 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008726 goto out_free_tr;
8727
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008728 ftrace_init_trace_array(tr);
8729
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008730 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008731
8732 if (trace_instance_dir) {
8733 ret = trace_array_create_dir(tr);
8734 if (ret)
8735 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09008736 } else
8737 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008738
8739 list_add(&tr->list, &ftrace_trace_arrays);
8740
Divya Indi28879782019-11-20 11:08:38 -08008741 tr->ref++;
8742
Divya Indif45d1222019-03-20 11:28:51 -07008743 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008744
8745 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008746 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008747 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008748 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008749 kfree(tr->name);
8750 kfree(tr);
8751
Divya Indif45d1222019-03-20 11:28:51 -07008752 return ERR_PTR(ret);
8753}
Steven Rostedt277ba042012-08-03 16:10:49 -04008754
Divya Indif45d1222019-03-20 11:28:51 -07008755static int instance_mkdir(const char *name)
8756{
Divya Indi28879782019-11-20 11:08:38 -08008757 struct trace_array *tr;
8758 int ret;
8759
8760 mutex_lock(&event_mutex);
8761 mutex_lock(&trace_types_lock);
8762
8763 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008764 if (trace_array_find(name))
8765 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008766
8767 tr = trace_array_create(name);
8768
8769 ret = PTR_ERR_OR_ZERO(tr);
8770
8771out_unlock:
8772 mutex_unlock(&trace_types_lock);
8773 mutex_unlock(&event_mutex);
8774 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008775}
8776
Divya Indi28879782019-11-20 11:08:38 -08008777/**
8778 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8779 * @name: The name of the trace array to be looked up/created.
8780 *
8781 * Returns pointer to trace array with given name.
8782 * NULL, if it cannot be created.
8783 *
8784 * NOTE: This function increments the reference counter associated with the
8785 * trace array returned. This makes sure it cannot be freed while in use.
8786 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008787 * If the trace_array is to be freed, trace_array_destroy() needs to
8788 * be called after the trace_array_put(), or simply let user space delete
8789 * it from the tracefs instances directory. But until the
8790 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008791 *
8792 */
8793struct trace_array *trace_array_get_by_name(const char *name)
8794{
8795 struct trace_array *tr;
8796
8797 mutex_lock(&event_mutex);
8798 mutex_lock(&trace_types_lock);
8799
8800 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8801 if (tr->name && strcmp(tr->name, name) == 0)
8802 goto out_unlock;
8803 }
8804
8805 tr = trace_array_create(name);
8806
8807 if (IS_ERR(tr))
8808 tr = NULL;
8809out_unlock:
8810 if (tr)
8811 tr->ref++;
8812
8813 mutex_unlock(&trace_types_lock);
8814 mutex_unlock(&event_mutex);
8815 return tr;
8816}
8817EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8818
Divya Indif45d1222019-03-20 11:28:51 -07008819static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008820{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008821 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008822
Divya Indi28879782019-11-20 11:08:38 -08008823 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008824 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07008825 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008826
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008827 list_del(&tr->list);
8828
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008829 /* Disable all the flags that were enabled coming in */
8830 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8831 if ((1 << i) & ZEROED_TRACE_FLAGS)
8832 set_tracer_flag(tr, 1 << i, 0);
8833 }
8834
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008835 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308836 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008837 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008838 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008839 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008840 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008841 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008842
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008843 for (i = 0; i < tr->nr_topts; i++) {
8844 kfree(tr->topts[i].topts);
8845 }
8846 kfree(tr->topts);
8847
Chunyu Hudb9108e02017-07-20 18:36:09 +08008848 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008849 kfree(tr->name);
8850 kfree(tr);
8851
Divya Indif45d1222019-03-20 11:28:51 -07008852 return 0;
8853}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008854
Divya Indie585e642019-08-14 10:55:24 -07008855int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008856{
Divya Indie585e642019-08-14 10:55:24 -07008857 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008858 int ret;
8859
Divya Indie585e642019-08-14 10:55:24 -07008860 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008861 return -EINVAL;
8862
8863 mutex_lock(&event_mutex);
8864 mutex_lock(&trace_types_lock);
8865
Divya Indie585e642019-08-14 10:55:24 -07008866 ret = -ENODEV;
8867
8868 /* Making sure trace array exists before destroying it. */
8869 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8870 if (tr == this_tr) {
8871 ret = __remove_instance(tr);
8872 break;
8873 }
8874 }
Divya Indif45d1222019-03-20 11:28:51 -07008875
8876 mutex_unlock(&trace_types_lock);
8877 mutex_unlock(&event_mutex);
8878
8879 return ret;
8880}
8881EXPORT_SYMBOL_GPL(trace_array_destroy);
8882
8883static int instance_rmdir(const char *name)
8884{
8885 struct trace_array *tr;
8886 int ret;
8887
8888 mutex_lock(&event_mutex);
8889 mutex_lock(&trace_types_lock);
8890
8891 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008892 tr = trace_array_find(name);
8893 if (tr)
8894 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008895
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008896 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008897 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008898
8899 return ret;
8900}
8901
Steven Rostedt277ba042012-08-03 16:10:49 -04008902static __init void create_trace_instances(struct dentry *d_tracer)
8903{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008904 struct trace_array *tr;
8905
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008906 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8907 instance_mkdir,
8908 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008909 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008910 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008911
8912 mutex_lock(&event_mutex);
8913 mutex_lock(&trace_types_lock);
8914
8915 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8916 if (!tr->name)
8917 continue;
8918 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8919 "Failed to create instance directory\n"))
8920 break;
8921 }
8922
8923 mutex_unlock(&trace_types_lock);
8924 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008925}
8926
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008927static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008928init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008929{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008930 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008931 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008932
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008933 trace_create_file("available_tracers", 0444, d_tracer,
8934 tr, &show_traces_fops);
8935
8936 trace_create_file("current_tracer", 0644, d_tracer,
8937 tr, &set_tracer_fops);
8938
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008939 trace_create_file("tracing_cpumask", 0644, d_tracer,
8940 tr, &tracing_cpumask_fops);
8941
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008942 trace_create_file("trace_options", 0644, d_tracer,
8943 tr, &tracing_iter_fops);
8944
8945 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008946 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008947
8948 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008949 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008950
8951 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008952 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008953
8954 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8955 tr, &tracing_total_entries_fops);
8956
Wang YanQing238ae932013-05-26 16:52:01 +08008957 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008958 tr, &tracing_free_buffer_fops);
8959
8960 trace_create_file("trace_marker", 0220, d_tracer,
8961 tr, &tracing_mark_fops);
8962
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008963 file = __find_event_file(tr, "ftrace", "print");
8964 if (file && file->dir)
8965 trace_create_file("trigger", 0644, file->dir, file,
8966 &event_trigger_fops);
8967 tr->trace_marker_file = file;
8968
Steven Rostedtfa32e852016-07-06 15:25:08 -04008969 trace_create_file("trace_marker_raw", 0220, d_tracer,
8970 tr, &tracing_mark_raw_fops);
8971
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008972 trace_create_file("trace_clock", 0644, d_tracer, tr,
8973 &trace_clock_fops);
8974
8975 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008976 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008977
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008978 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8979 &trace_time_stamp_mode_fops);
8980
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008981 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008982
8983 trace_create_file("buffer_percent", 0444, d_tracer,
8984 tr, &buffer_percent_fops);
8985
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008986 create_trace_options_dir(tr);
8987
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008988#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008989 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008990#endif
8991
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008992 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008993 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008994
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008995#ifdef CONFIG_TRACER_SNAPSHOT
8996 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008997 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008998#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008999
Tom Zanussi8a062902019-03-31 18:48:15 -05009000 trace_create_file("error_log", 0644, d_tracer,
9001 tr, &tracing_err_log_fops);
9002
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009003 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009004 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009005
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009006 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009007}
9008
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009009static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009010{
9011 struct vfsmount *mnt;
9012 struct file_system_type *type;
9013
9014 /*
9015 * To maintain backward compatibility for tools that mount
9016 * debugfs to get to the tracing facility, tracefs is automatically
9017 * mounted to the debugfs/tracing directory.
9018 */
9019 type = get_fs_type("tracefs");
9020 if (!type)
9021 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009022 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009023 put_filesystem(type);
9024 if (IS_ERR(mnt))
9025 return NULL;
9026 mntget(mnt);
9027
9028 return mnt;
9029}
9030
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009031/**
9032 * tracing_init_dentry - initialize top level trace array
9033 *
9034 * This is called when creating files or directories in the tracing
9035 * directory. It is called via fs_initcall() by any of the boot up code
9036 * and expects to return the dentry of the top level tracing directory.
9037 */
Wei Yang22c36b12020-07-12 09:10:36 +08009038int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009039{
9040 struct trace_array *tr = &global_trace;
9041
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009042 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009043 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009044 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009045 }
9046
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009047 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009048 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009049 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009050
Peter Enderborg072e1332020-07-16 09:15:10 +02009051 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009052 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009053
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009054 /*
9055 * As there may still be users that expect the tracing
9056 * files to exist in debugfs/tracing, we must automount
9057 * the tracefs file system there, so older tools still
9058 * work with the newer kerenl.
9059 */
9060 tr->dir = debugfs_create_automount("tracing", NULL,
9061 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009062
Wei Yang22c36b12020-07-12 09:10:36 +08009063 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009064}
9065
Jeremy Linton00f4b652017-05-31 16:56:43 -05009066extern struct trace_eval_map *__start_ftrace_eval_maps[];
9067extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009068
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009069static struct workqueue_struct *eval_map_wq __initdata;
9070static struct work_struct eval_map_work __initdata;
9071
9072static void __init eval_map_work_func(struct work_struct *work)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009073{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009074 int len;
9075
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009076 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009077 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009078}
9079
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009080static int __init trace_eval_init(void)
9081{
9082 INIT_WORK(&eval_map_work, eval_map_work_func);
9083
9084 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9085 if (!eval_map_wq) {
9086 pr_err("Unable to allocate eval_map_wq\n");
9087 /* Do work here */
9088 eval_map_work_func(&eval_map_work);
9089 return -ENOMEM;
9090 }
9091
9092 queue_work(eval_map_wq, &eval_map_work);
9093 return 0;
9094}
9095
9096static int __init trace_eval_sync(void)
9097{
9098 /* Make sure the eval map updates are finished */
9099 if (eval_map_wq)
9100 destroy_workqueue(eval_map_wq);
9101 return 0;
9102}
9103
9104late_initcall_sync(trace_eval_sync);
9105
9106
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009107#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009108static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009109{
Jeremy Linton99be6472017-05-31 16:56:44 -05009110 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009111 return;
9112
9113 /*
9114 * Modules with bad taint do not have events created, do
9115 * not bother with enums either.
9116 */
9117 if (trace_module_has_bad_taint(mod))
9118 return;
9119
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009120 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009121}
9122
Jeremy Linton681bec02017-05-31 16:56:53 -05009123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009124static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009125{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009126 union trace_eval_map_item *map;
9127 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009128
Jeremy Linton99be6472017-05-31 16:56:44 -05009129 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009130 return;
9131
Jeremy Linton1793ed92017-05-31 16:56:46 -05009132 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009133
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009134 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009135
9136 while (map) {
9137 if (map->head.mod == mod)
9138 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009139 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009140 last = &map->tail.next;
9141 map = map->tail.next;
9142 }
9143 if (!map)
9144 goto out;
9145
Jeremy Linton5f60b352017-05-31 16:56:47 -05009146 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009147 kfree(map);
9148 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009149 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009150}
9151#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009152static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009153#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009154
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009155static int trace_module_notify(struct notifier_block *self,
9156 unsigned long val, void *data)
9157{
9158 struct module *mod = data;
9159
9160 switch (val) {
9161 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009162 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009163 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009164 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009165 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009166 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009167 }
9168
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009169 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009170}
9171
9172static struct notifier_block trace_module_nb = {
9173 .notifier_call = trace_module_notify,
9174 .priority = 0,
9175};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009176#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009177
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009178static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009179{
Wei Yang22c36b12020-07-12 09:10:36 +08009180 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009181
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009182 trace_access_lock_init();
9183
Wei Yang22c36b12020-07-12 09:10:36 +08009184 ret = tracing_init_dentry();
9185 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009186 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009187
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009188 event_trace_init();
9189
Wei Yang22c36b12020-07-12 09:10:36 +08009190 init_tracer_tracefs(&global_trace, NULL);
9191 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009192
Wei Yang22c36b12020-07-12 09:10:36 +08009193 trace_create_file("tracing_thresh", 0644, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009194 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009195
Wei Yang22c36b12020-07-12 09:10:36 +08009196 trace_create_file("README", 0444, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009197 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009198
Wei Yang22c36b12020-07-12 09:10:36 +08009199 trace_create_file("saved_cmdlines", 0444, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009200 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009201
Wei Yang22c36b12020-07-12 09:10:36 +08009202 trace_create_file("saved_cmdlines_size", 0644, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009203 NULL, &tracing_saved_cmdlines_size_fops);
9204
Wei Yang22c36b12020-07-12 09:10:36 +08009205 trace_create_file("saved_tgids", 0444, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009206 NULL, &tracing_saved_tgids_fops);
9207
Jeremy Linton5f60b352017-05-31 16:56:47 -05009208 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009209
Wei Yang22c36b12020-07-12 09:10:36 +08009210 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009211
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009212#ifdef CONFIG_MODULES
9213 register_module_notifier(&trace_module_nb);
9214#endif
9215
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009216#ifdef CONFIG_DYNAMIC_FTRACE
Wei Yang22c36b12020-07-12 09:10:36 +08009217 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009218 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009219#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009220
Wei Yang22c36b12020-07-12 09:10:36 +08009221 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009222
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009223 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009224
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009225 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009226}
9227
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009228static int trace_panic_handler(struct notifier_block *this,
9229 unsigned long event, void *unused)
9230{
Steven Rostedt944ac422008-10-23 19:26:08 -04009231 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009232 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009233 return NOTIFY_OK;
9234}
9235
9236static struct notifier_block trace_panic_notifier = {
9237 .notifier_call = trace_panic_handler,
9238 .next = NULL,
9239 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9240};
9241
9242static int trace_die_handler(struct notifier_block *self,
9243 unsigned long val,
9244 void *data)
9245{
9246 switch (val) {
9247 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009248 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009249 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009250 break;
9251 default:
9252 break;
9253 }
9254 return NOTIFY_OK;
9255}
9256
9257static struct notifier_block trace_die_notifier = {
9258 .notifier_call = trace_die_handler,
9259 .priority = 200
9260};
9261
9262/*
9263 * printk is set to max of 1024, we really don't need it that big.
9264 * Nothing should be printing 1000 characters anyway.
9265 */
9266#define TRACE_MAX_PRINT 1000
9267
9268/*
9269 * Define here KERN_TRACE so that we have one place to modify
9270 * it if we decide to change what log level the ftrace dump
9271 * should be at.
9272 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009273#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009274
Jason Wessel955b61e2010-08-05 09:22:23 -05009275void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009276trace_printk_seq(struct trace_seq *s)
9277{
9278 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009279 if (s->seq.len >= TRACE_MAX_PRINT)
9280 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009281
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009282 /*
9283 * More paranoid code. Although the buffer size is set to
9284 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9285 * an extra layer of protection.
9286 */
9287 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9288 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009289
9290 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009291 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009292
9293 printk(KERN_TRACE "%s", s->buffer);
9294
Steven Rostedtf9520752009-03-02 14:04:40 -05009295 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009296}
9297
Jason Wessel955b61e2010-08-05 09:22:23 -05009298void trace_init_global_iter(struct trace_iterator *iter)
9299{
9300 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009301 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009302 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009303 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009304
9305 if (iter->trace && iter->trace->open)
9306 iter->trace->open(iter);
9307
9308 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009309 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009310 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9311
9312 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9313 if (trace_clocks[iter->tr->clock_id].in_ns)
9314 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009315}
9316
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009317void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009318{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009319 /* use static because iter can be a bit big for the stack */
9320 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009321 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009322 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009323 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009324 unsigned long flags;
9325 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009326
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009327 /* Only allow one dump user at a time. */
9328 if (atomic_inc_return(&dump_running) != 1) {
9329 atomic_dec(&dump_running);
9330 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009331 }
9332
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009333 /*
9334 * Always turn off tracing when we dump.
9335 * We don't need to show trace output of what happens
9336 * between multiple crashes.
9337 *
9338 * If the user does a sysrq-z, then they can re-enable
9339 * tracing with echo 1 > tracing_on.
9340 */
9341 tracing_off();
9342
9343 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009344 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009345
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009346 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009347 trace_init_global_iter(&iter);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009348 /* Can not use kmalloc for iter.temp */
9349 iter.temp = static_temp_buf;
9350 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009351
Steven Rostedtd7690412008-10-01 00:29:53 -04009352 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009353 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009354 }
9355
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009356 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009357
Török Edwinb54d3de2008-11-22 13:28:48 +02009358 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009359 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009360
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009361 switch (oops_dump_mode) {
9362 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009363 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009364 break;
9365 case DUMP_ORIG:
9366 iter.cpu_file = raw_smp_processor_id();
9367 break;
9368 case DUMP_NONE:
9369 goto out_enable;
9370 default:
9371 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009372 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009373 }
9374
9375 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009376
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009377 /* Did function tracer already get disabled? */
9378 if (ftrace_is_dead()) {
9379 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9380 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9381 }
9382
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009383 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009384 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009385 * the next buffer. This is a bit expensive, but is
9386 * not done often. We fill all what we can read,
9387 * and then release the locks again.
9388 */
9389
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009390 while (!trace_empty(&iter)) {
9391
9392 if (!cnt)
9393 printk(KERN_TRACE "---------------------------------\n");
9394
9395 cnt++;
9396
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009397 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009398 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009399
Jason Wessel955b61e2010-08-05 09:22:23 -05009400 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009401 int ret;
9402
9403 ret = print_trace_line(&iter);
9404 if (ret != TRACE_TYPE_NO_CONSUME)
9405 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009406 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009407 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009408
9409 trace_printk_seq(&iter.seq);
9410 }
9411
9412 if (!cnt)
9413 printk(KERN_TRACE " (ftrace buffer empty)\n");
9414 else
9415 printk(KERN_TRACE "---------------------------------\n");
9416
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009417 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009418 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009419
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009420 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009421 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009422 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009423 atomic_dec(&dump_running);
9424 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009425 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009426}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009427EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009428
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009429int trace_run_command(const char *buf, int (*createfn)(int, char **))
9430{
9431 char **argv;
9432 int argc, ret;
9433
9434 argc = 0;
9435 ret = 0;
9436 argv = argv_split(GFP_KERNEL, buf, &argc);
9437 if (!argv)
9438 return -ENOMEM;
9439
9440 if (argc)
9441 ret = createfn(argc, argv);
9442
9443 argv_free(argv);
9444
9445 return ret;
9446}
9447
9448#define WRITE_BUFSIZE 4096
9449
9450ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9451 size_t count, loff_t *ppos,
9452 int (*createfn)(int, char **))
9453{
9454 char *kbuf, *buf, *tmp;
9455 int ret = 0;
9456 size_t done = 0;
9457 size_t size;
9458
9459 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9460 if (!kbuf)
9461 return -ENOMEM;
9462
9463 while (done < count) {
9464 size = count - done;
9465
9466 if (size >= WRITE_BUFSIZE)
9467 size = WRITE_BUFSIZE - 1;
9468
9469 if (copy_from_user(kbuf, buffer + done, size)) {
9470 ret = -EFAULT;
9471 goto out;
9472 }
9473 kbuf[size] = '\0';
9474 buf = kbuf;
9475 do {
9476 tmp = strchr(buf, '\n');
9477 if (tmp) {
9478 *tmp = '\0';
9479 size = tmp - buf + 1;
9480 } else {
9481 size = strlen(buf);
9482 if (done + size < count) {
9483 if (buf != kbuf)
9484 break;
9485 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9486 pr_warn("Line length is too long: Should be less than %d\n",
9487 WRITE_BUFSIZE - 2);
9488 ret = -EINVAL;
9489 goto out;
9490 }
9491 }
9492 done += size;
9493
9494 /* Remove comments */
9495 tmp = strchr(buf, '#');
9496
9497 if (tmp)
9498 *tmp = '\0';
9499
9500 ret = trace_run_command(buf, createfn);
9501 if (ret)
9502 goto out;
9503 buf += size;
9504
9505 } while (done < count);
9506 }
9507 ret = done;
9508
9509out:
9510 kfree(kbuf);
9511
9512 return ret;
9513}
9514
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009515__init static int tracer_alloc_buffers(void)
9516{
Steven Rostedt73c51622009-03-11 13:42:01 -04009517 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309518 int ret = -ENOMEM;
9519
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009520
9521 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009522 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009523 return -EPERM;
9524 }
9525
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009526 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009527 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009528 * than we have bits for.
9529 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009530 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009531
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309532 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9533 goto out;
9534
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009535 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309536 goto out_free_buffer_mask;
9537
Steven Rostedt07d777f2011-09-22 14:01:55 -04009538 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009539 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009540 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009541 trace_printk_init_buffers();
9542
Steven Rostedt73c51622009-03-11 13:42:01 -04009543 /* To save memory, keep the ring buffer size to its minimum */
9544 if (ring_buffer_expanded)
9545 ring_buf_size = trace_buf_size;
9546 else
9547 ring_buf_size = 1;
9548
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309549 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009550 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009551
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009552 raw_spin_lock_init(&global_trace.start_lock);
9553
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009554 /*
9555 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009556 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009557 * the buffer, then the user would lose any trace that was in the
9558 * buffer. The memory will be removed once the "instance" is removed.
9559 */
9560 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9561 "trace/RB:preapre", trace_rb_cpu_prepare,
9562 NULL);
9563 if (ret < 0)
9564 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009565 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009566 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009567 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9568 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009569 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009570
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009571 if (trace_create_savedcmd() < 0)
9572 goto out_free_temp_buffer;
9573
Steven Rostedtab464282008-05-12 21:21:00 +02009574 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009575 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009576 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009577 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009578 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009579
Steven Rostedt499e5472012-02-22 15:50:28 -05009580 if (global_trace.buffer_disabled)
9581 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009582
Steven Rostedte1e232c2014-02-10 23:38:46 -05009583 if (trace_boot_clock) {
9584 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9585 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009586 pr_warn("Trace clock %s not defined, going back to default\n",
9587 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009588 }
9589
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009590 /*
9591 * register_tracer() might reference current_trace, so it
9592 * needs to be set before we register anything. This is
9593 * just a bootstrap of current_trace anyway.
9594 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009595 global_trace.current_trace = &nop_trace;
9596
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009597 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9598
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009599 ftrace_init_global_array_ops(&global_trace);
9600
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009601 init_trace_flags_index(&global_trace);
9602
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009603 register_tracer(&nop_trace);
9604
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009605 /* Function tracing may start here (via kernel command line) */
9606 init_function_trace();
9607
Steven Rostedt60a11772008-05-12 21:20:44 +02009608 /* All seems OK, enable tracing */
9609 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009610
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009611 atomic_notifier_chain_register(&panic_notifier_list,
9612 &trace_panic_notifier);
9613
9614 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009615
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009616 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9617
9618 INIT_LIST_HEAD(&global_trace.systems);
9619 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009620 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009621 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009622 list_add(&global_trace.list, &ftrace_trace_arrays);
9623
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009624 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009625
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009626 register_snapshot_cmd();
9627
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009628 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009629
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009630out_free_savedcmd:
9631 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009632out_free_temp_buffer:
9633 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009634out_rm_hp_state:
9635 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309636out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009637 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309638out_free_buffer_mask:
9639 free_cpumask_var(tracing_buffer_mask);
9640out:
9641 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009642}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009643
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009644void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009645{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009646 if (tracepoint_printk) {
9647 tracepoint_print_iter =
9648 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009649 if (MEM_FAIL(!tracepoint_print_iter,
9650 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009651 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009652 else
9653 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009654 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009655 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009656}
9657
9658void __init trace_init(void)
9659{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009660 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009661}
9662
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009663__init static int clear_boot_tracer(void)
9664{
9665 /*
9666 * The default tracer at boot buffer is an init section.
9667 * This function is called in lateinit. If we did not
9668 * find the boot tracer, then clear it out, to prevent
9669 * later registration from accessing the buffer that is
9670 * about to be freed.
9671 */
9672 if (!default_bootup_tracer)
9673 return 0;
9674
9675 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9676 default_bootup_tracer);
9677 default_bootup_tracer = NULL;
9678
9679 return 0;
9680}
9681
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009682fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009683late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009684
9685#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9686__init static int tracing_set_default_clock(void)
9687{
9688 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009689 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009690 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9691 pr_warn("Can not set tracing clock due to lockdown\n");
9692 return -EPERM;
9693 }
9694
Chris Wilson3fd49c92018-03-30 16:01:31 +01009695 printk(KERN_WARNING
9696 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9697 "If you want to keep using the local clock, then add:\n"
9698 " \"trace_clock=local\"\n"
9699 "on the kernel command line\n");
9700 tracing_set_clock(&global_trace, "global");
9701 }
9702
9703 return 0;
9704}
9705late_initcall_sync(tracing_set_default_clock);
9706#endif