blob: 06134189e9a720d5202fa0e1e8ea59fd013daff6 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900165int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -0500166static void ftrace_trace_userstack(struct trace_array *tr,
167 struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200168 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500169
Li Zefanee6c2c12009-09-18 14:06:47 +0800170#define MAX_TRACER_SIZE 100
171static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500172static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500174static bool allocate_snapshot;
175
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200176static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500179 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400180 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500181 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100182 return 1;
183}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200184__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100185
Steven Rostedt944ac422008-10-23 19:26:08 -0400186static int __init set_ftrace_dump_on_oops(char *str)
187{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200188 if (*str++ != '=' || !*str) {
189 ftrace_dump_on_oops = DUMP_ALL;
190 return 1;
191 }
192
193 if (!strcmp("orig_cpu", str)) {
194 ftrace_dump_on_oops = DUMP_ORIG;
195 return 1;
196 }
197
198 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400199}
200__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200201
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202static int __init stop_trace_on_warning(char *str)
203{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200204 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
205 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400206 return 1;
207}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200208__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400209
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400210static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500211{
212 allocate_snapshot = true;
213 /* We also need the main ring buffer expanded */
214 ring_buffer_expanded = true;
215 return 1;
216}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400217__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500218
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400219
220static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400221
222static int __init set_trace_boot_options(char *str)
223{
Chen Gang67012ab2013-04-08 12:06:44 +0800224 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400225 return 0;
226}
227__setup("trace_options=", set_trace_boot_options);
228
Steven Rostedte1e232c2014-02-10 23:38:46 -0500229static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
230static char *trace_boot_clock __initdata;
231
232static int __init set_trace_boot_clock(char *str)
233{
234 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
235 trace_boot_clock = trace_boot_clock_buf;
236 return 0;
237}
238__setup("trace_clock=", set_trace_boot_clock);
239
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500240static int __init set_tracepoint_printk(char *str)
241{
242 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
243 tracepoint_printk = 1;
244 return 1;
245}
246__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400247
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100248unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200249{
250 nsec += 500;
251 do_div(nsec, 1000);
252 return nsec;
253}
254
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300255static void
256trace_process_export(struct trace_export *export,
257 struct ring_buffer_event *event, int flag)
258{
259 struct trace_entry *entry;
260 unsigned int size = 0;
261
262 if (export->flags & flag) {
263 entry = ring_buffer_event_data(event);
264 size = ring_buffer_event_length(event);
265 export->write(export, entry, size);
266 }
267}
268
269static DEFINE_MUTEX(ftrace_export_lock);
270
271static struct trace_export __rcu *ftrace_exports_list __read_mostly;
272
273static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
274static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300275static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300276
277static inline void ftrace_exports_enable(struct trace_export *export)
278{
279 if (export->flags & TRACE_EXPORT_FUNCTION)
280 static_branch_inc(&trace_function_exports_enabled);
281
282 if (export->flags & TRACE_EXPORT_EVENT)
283 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300284
285 if (export->flags & TRACE_EXPORT_MARKER)
286 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300287}
288
289static inline void ftrace_exports_disable(struct trace_export *export)
290{
291 if (export->flags & TRACE_EXPORT_FUNCTION)
292 static_branch_dec(&trace_function_exports_enabled);
293
294 if (export->flags & TRACE_EXPORT_EVENT)
295 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300296
297 if (export->flags & TRACE_EXPORT_MARKER)
298 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300299}
300
301static void ftrace_exports(struct ring_buffer_event *event, int flag)
302{
303 struct trace_export *export;
304
305 preempt_disable_notrace();
306
307 export = rcu_dereference_raw_check(ftrace_exports_list);
308 while (export) {
309 trace_process_export(export, event, flag);
310 export = rcu_dereference_raw_check(export->next);
311 }
312
313 preempt_enable_notrace();
314}
315
316static inline void
317add_trace_export(struct trace_export **list, struct trace_export *export)
318{
319 rcu_assign_pointer(export->next, *list);
320 /*
321 * We are entering export into the list but another
322 * CPU might be walking that list. We need to make sure
323 * the export->next pointer is valid before another CPU sees
324 * the export pointer included into the list.
325 */
326 rcu_assign_pointer(*list, export);
327}
328
329static inline int
330rm_trace_export(struct trace_export **list, struct trace_export *export)
331{
332 struct trace_export **p;
333
334 for (p = list; *p != NULL; p = &(*p)->next)
335 if (*p == export)
336 break;
337
338 if (*p != export)
339 return -1;
340
341 rcu_assign_pointer(*p, (*p)->next);
342
343 return 0;
344}
345
346static inline void
347add_ftrace_export(struct trace_export **list, struct trace_export *export)
348{
349 ftrace_exports_enable(export);
350
351 add_trace_export(list, export);
352}
353
354static inline int
355rm_ftrace_export(struct trace_export **list, struct trace_export *export)
356{
357 int ret;
358
359 ret = rm_trace_export(list, export);
360 ftrace_exports_disable(export);
361
362 return ret;
363}
364
365int register_ftrace_export(struct trace_export *export)
366{
367 if (WARN_ON_ONCE(!export->write))
368 return -1;
369
370 mutex_lock(&ftrace_export_lock);
371
372 add_ftrace_export(&ftrace_exports_list, export);
373
374 mutex_unlock(&ftrace_export_lock);
375
376 return 0;
377}
378EXPORT_SYMBOL_GPL(register_ftrace_export);
379
380int unregister_ftrace_export(struct trace_export *export)
381{
382 int ret;
383
384 mutex_lock(&ftrace_export_lock);
385
386 ret = rm_ftrace_export(&ftrace_exports_list, export);
387
388 mutex_unlock(&ftrace_export_lock);
389
390 return ret;
391}
392EXPORT_SYMBOL_GPL(unregister_ftrace_export);
393
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400394/* trace_flags holds trace_options default values */
395#define TRACE_DEFAULT_FLAGS \
396 (FUNCTION_DEFAULT_FLAGS | \
397 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
398 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
399 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
400 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
401
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400402/* trace_options that are only supported by global_trace */
403#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
404 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
405
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400406/* trace_flags that are default zero for instances */
407#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900408 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400409
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200410/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800411 * The global_trace is the descriptor that holds the top-level tracing
412 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200413 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400414static struct trace_array global_trace = {
415 .trace_flags = TRACE_DEFAULT_FLAGS,
416};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200417
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400418LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200419
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400420int trace_array_get(struct trace_array *this_tr)
421{
422 struct trace_array *tr;
423 int ret = -ENODEV;
424
425 mutex_lock(&trace_types_lock);
426 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
427 if (tr == this_tr) {
428 tr->ref++;
429 ret = 0;
430 break;
431 }
432 }
433 mutex_unlock(&trace_types_lock);
434
435 return ret;
436}
437
438static void __trace_array_put(struct trace_array *this_tr)
439{
440 WARN_ON(!this_tr->ref);
441 this_tr->ref--;
442}
443
Divya Indi28879782019-11-20 11:08:38 -0800444/**
445 * trace_array_put - Decrement the reference counter for this trace array.
446 *
447 * NOTE: Use this when we no longer need the trace array returned by
448 * trace_array_get_by_name(). This ensures the trace array can be later
449 * destroyed.
450 *
451 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400452void trace_array_put(struct trace_array *this_tr)
453{
Divya Indi28879782019-11-20 11:08:38 -0800454 if (!this_tr)
455 return;
456
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400457 mutex_lock(&trace_types_lock);
458 __trace_array_put(this_tr);
459 mutex_unlock(&trace_types_lock);
460}
Divya Indi28879782019-11-20 11:08:38 -0800461EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400462
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400463int tracing_check_open_get_tr(struct trace_array *tr)
464{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400465 int ret;
466
467 ret = security_locked_down(LOCKDOWN_TRACEFS);
468 if (ret)
469 return ret;
470
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400471 if (tracing_disabled)
472 return -ENODEV;
473
474 if (tr && trace_array_get(tr) < 0)
475 return -ENODEV;
476
477 return 0;
478}
479
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400480int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500481 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500482 struct ring_buffer_event *event)
483{
484 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
485 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400486 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500487 return 1;
488 }
489
490 return 0;
491}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500492
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400493void trace_free_pid_list(struct trace_pid_list *pid_list)
494{
495 vfree(pid_list->pids);
496 kfree(pid_list);
497}
498
Steven Rostedtd8275c42016-04-14 12:15:22 -0400499/**
500 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
501 * @filtered_pids: The list of pids to check
502 * @search_pid: The PID to find in @filtered_pids
503 *
504 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
505 */
506bool
507trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
508{
509 /*
510 * If pid_max changed after filtered_pids was created, we
511 * by default ignore all pids greater than the previous pid_max.
512 */
513 if (search_pid >= filtered_pids->pid_max)
514 return false;
515
516 return test_bit(search_pid, filtered_pids->pids);
517}
518
519/**
520 * trace_ignore_this_task - should a task be ignored for tracing
521 * @filtered_pids: The list of pids to check
522 * @task: The task that should be ignored if not filtered
523 *
524 * Checks if @task should be traced or not from @filtered_pids.
525 * Returns true if @task should *NOT* be traced.
526 * Returns false if @task should be traced.
527 */
528bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400529trace_ignore_this_task(struct trace_pid_list *filtered_pids,
530 struct trace_pid_list *filtered_no_pids,
531 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400532{
533 /*
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400534 * If filterd_no_pids is not empty, and the task's pid is listed
535 * in filtered_no_pids, then return true.
536 * Otherwise, if filtered_pids is empty, that means we can
537 * trace all tasks. If it has content, then only trace pids
538 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400539 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400540
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400541 return (filtered_pids &&
542 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
543 (filtered_no_pids &&
544 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400545}
546
547/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700548 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400549 * @pid_list: The list to modify
550 * @self: The current task for fork or NULL for exit
551 * @task: The task to add or remove
552 *
553 * If adding a task, if @self is defined, the task is only added if @self
554 * is also included in @pid_list. This happens on fork and tasks should
555 * only be added when the parent is listed. If @self is NULL, then the
556 * @task pid will be removed from the list, which would happen on exit
557 * of a task.
558 */
559void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
560 struct task_struct *self,
561 struct task_struct *task)
562{
563 if (!pid_list)
564 return;
565
566 /* For forks, we only add if the forking task is listed */
567 if (self) {
568 if (!trace_find_filtered_pid(pid_list, self->pid))
569 return;
570 }
571
572 /* Sorry, but we don't support pid_max changing after setting */
573 if (task->pid >= pid_list->pid_max)
574 return;
575
576 /* "self" is set for forks, and NULL for exits */
577 if (self)
578 set_bit(task->pid, pid_list->pids);
579 else
580 clear_bit(task->pid, pid_list->pids);
581}
582
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400583/**
584 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
585 * @pid_list: The pid list to show
586 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
587 * @pos: The position of the file
588 *
589 * This is used by the seq_file "next" operation to iterate the pids
590 * listed in a trace_pid_list structure.
591 *
592 * Returns the pid+1 as we want to display pid of zero, but NULL would
593 * stop the iteration.
594 */
595void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
596{
597 unsigned long pid = (unsigned long)v;
598
599 (*pos)++;
600
601 /* pid already is +1 of the actual prevous bit */
602 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
603
604 /* Return pid + 1 to allow zero to be represented */
605 if (pid < pid_list->pid_max)
606 return (void *)(pid + 1);
607
608 return NULL;
609}
610
611/**
612 * trace_pid_start - Used for seq_file to start reading pid lists
613 * @pid_list: The pid list to show
614 * @pos: The position of the file
615 *
616 * This is used by seq_file "start" operation to start the iteration
617 * of listing pids.
618 *
619 * Returns the pid+1 as we want to display pid of zero, but NULL would
620 * stop the iteration.
621 */
622void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
623{
624 unsigned long pid;
625 loff_t l = 0;
626
627 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
628 if (pid >= pid_list->pid_max)
629 return NULL;
630
631 /* Return pid + 1 so that zero can be the exit value */
632 for (pid++; pid && l < *pos;
633 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
634 ;
635 return (void *)pid;
636}
637
638/**
639 * trace_pid_show - show the current pid in seq_file processing
640 * @m: The seq_file structure to write into
641 * @v: A void pointer of the pid (+1) value to display
642 *
643 * Can be directly used by seq_file operations to display the current
644 * pid value.
645 */
646int trace_pid_show(struct seq_file *m, void *v)
647{
648 unsigned long pid = (unsigned long)v - 1;
649
650 seq_printf(m, "%lu\n", pid);
651 return 0;
652}
653
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400654/* 128 should be much more than enough */
655#define PID_BUF_SIZE 127
656
657int trace_pid_write(struct trace_pid_list *filtered_pids,
658 struct trace_pid_list **new_pid_list,
659 const char __user *ubuf, size_t cnt)
660{
661 struct trace_pid_list *pid_list;
662 struct trace_parser parser;
663 unsigned long val;
664 int nr_pids = 0;
665 ssize_t read = 0;
666 ssize_t ret = 0;
667 loff_t pos;
668 pid_t pid;
669
670 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
671 return -ENOMEM;
672
673 /*
674 * Always recreate a new array. The write is an all or nothing
675 * operation. Always create a new array when adding new pids by
676 * the user. If the operation fails, then the current list is
677 * not modified.
678 */
679 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500680 if (!pid_list) {
681 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400682 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500683 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400684
685 pid_list->pid_max = READ_ONCE(pid_max);
686
687 /* Only truncating will shrink pid_max */
688 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
689 pid_list->pid_max = filtered_pids->pid_max;
690
691 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
692 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500693 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400694 kfree(pid_list);
695 return -ENOMEM;
696 }
697
698 if (filtered_pids) {
699 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000700 for_each_set_bit(pid, filtered_pids->pids,
701 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400702 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400703 nr_pids++;
704 }
705 }
706
707 while (cnt > 0) {
708
709 pos = 0;
710
711 ret = trace_get_user(&parser, ubuf, cnt, &pos);
712 if (ret < 0 || !trace_parser_loaded(&parser))
713 break;
714
715 read += ret;
716 ubuf += ret;
717 cnt -= ret;
718
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400719 ret = -EINVAL;
720 if (kstrtoul(parser.buffer, 0, &val))
721 break;
722 if (val >= pid_list->pid_max)
723 break;
724
725 pid = (pid_t)val;
726
727 set_bit(pid, pid_list->pids);
728 nr_pids++;
729
730 trace_parser_clear(&parser);
731 ret = 0;
732 }
733 trace_parser_put(&parser);
734
735 if (ret < 0) {
736 trace_free_pid_list(pid_list);
737 return ret;
738 }
739
740 if (!nr_pids) {
741 /* Cleared the list of pids */
742 trace_free_pid_list(pid_list);
743 read = ret;
744 pid_list = NULL;
745 }
746
747 *new_pid_list = pid_list;
748
749 return read;
750}
751
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500752static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400753{
754 u64 ts;
755
756 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700757 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400758 return trace_clock_local();
759
Alexander Z Lam94571582013-08-02 18:36:16 -0700760 ts = ring_buffer_time_stamp(buf->buffer, cpu);
761 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400762
763 return ts;
764}
765
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100766u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700767{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500768 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700769}
770
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400771/**
772 * tracing_is_enabled - Show if global_trace has been disabled
773 *
774 * Shows if the global trace has been enabled or not. It uses the
775 * mirror flag "buffer_disabled" to be used in fast paths such as for
776 * the irqsoff tracer. But it may be inaccurate due to races. If you
777 * need to know the accurate state, use tracing_is_on() which is a little
778 * slower, but accurate.
779 */
Steven Rostedt90369902008-11-05 16:05:44 -0500780int tracing_is_enabled(void)
781{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400782 /*
783 * For quick access (irqsoff uses this in fast path), just
784 * return the mirror variable of the state of the ring buffer.
785 * It's a little racy, but we don't really care.
786 */
787 smp_rmb();
788 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500789}
790
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200791/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400792 * trace_buf_size is the size in bytes that is allocated
793 * for a buffer. Note, the number of bytes is always rounded
794 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400795 *
796 * This number is purposely set to a low number of 16384.
797 * If the dump on oops happens, it will be much appreciated
798 * to not have to wait for all that output. Anyway this can be
799 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200800 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400801#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400802
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400803static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200804
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200805/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200806static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200807
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200808/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200809 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200810 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700811DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200812
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800813/*
814 * serialize the access of the ring buffer
815 *
816 * ring buffer serializes readers, but it is low level protection.
817 * The validity of the events (which returns by ring_buffer_peek() ..etc)
818 * are not protected by ring buffer.
819 *
820 * The content of events may become garbage if we allow other process consumes
821 * these events concurrently:
822 * A) the page of the consumed events may become a normal page
823 * (not reader page) in ring buffer, and this page will be rewrited
824 * by events producer.
825 * B) The page of the consumed events may become a page for splice_read,
826 * and this page will be returned to system.
827 *
828 * These primitives allow multi process access to different cpu ring buffer
829 * concurrently.
830 *
831 * These primitives don't distinguish read-only and read-consume access.
832 * Multi read-only access are also serialized.
833 */
834
835#ifdef CONFIG_SMP
836static DECLARE_RWSEM(all_cpu_access_lock);
837static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
838
839static inline void trace_access_lock(int cpu)
840{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500841 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800842 /* gain it for accessing the whole ring buffer. */
843 down_write(&all_cpu_access_lock);
844 } else {
845 /* gain it for accessing a cpu ring buffer. */
846
Steven Rostedtae3b5092013-01-23 15:22:59 -0500847 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800848 down_read(&all_cpu_access_lock);
849
850 /* Secondly block other access to this @cpu ring buffer. */
851 mutex_lock(&per_cpu(cpu_access_lock, cpu));
852 }
853}
854
855static inline void trace_access_unlock(int cpu)
856{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500857 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800858 up_write(&all_cpu_access_lock);
859 } else {
860 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
861 up_read(&all_cpu_access_lock);
862 }
863}
864
865static inline void trace_access_lock_init(void)
866{
867 int cpu;
868
869 for_each_possible_cpu(cpu)
870 mutex_init(&per_cpu(cpu_access_lock, cpu));
871}
872
873#else
874
875static DEFINE_MUTEX(access_lock);
876
877static inline void trace_access_lock(int cpu)
878{
879 (void)cpu;
880 mutex_lock(&access_lock);
881}
882
883static inline void trace_access_unlock(int cpu)
884{
885 (void)cpu;
886 mutex_unlock(&access_lock);
887}
888
889static inline void trace_access_lock_init(void)
890{
891}
892
893#endif
894
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400895#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500896static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400897 unsigned long flags,
898 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400899static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500900 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400901 unsigned long flags,
902 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400903
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400904#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500905static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400906 unsigned long flags,
907 int skip, int pc, struct pt_regs *regs)
908{
909}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400910static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500911 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400912 unsigned long flags,
913 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400914{
915}
916
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400917#endif
918
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500919static __always_inline void
920trace_event_setup(struct ring_buffer_event *event,
921 int type, unsigned long flags, int pc)
922{
923 struct trace_entry *ent = ring_buffer_event_data(event);
924
Cong Wang46710f32019-05-25 09:57:59 -0700925 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500926}
927
928static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500929__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500930 int type,
931 unsigned long len,
932 unsigned long flags, int pc)
933{
934 struct ring_buffer_event *event;
935
936 event = ring_buffer_lock_reserve(buffer, len);
937 if (event != NULL)
938 trace_event_setup(event, type, flags, pc);
939
940 return event;
941}
942
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400943void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400944{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500945 if (tr->array_buffer.buffer)
946 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400947 /*
948 * This flag is looked at when buffers haven't been allocated
949 * yet, or by some tracers (like irqsoff), that just want to
950 * know if the ring buffer has been disabled, but it can handle
951 * races of where it gets disabled but we still do a record.
952 * As the check is in the fast path of the tracers, it is more
953 * important to be fast than accurate.
954 */
955 tr->buffer_disabled = 0;
956 /* Make the flag seen by readers */
957 smp_wmb();
958}
959
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200960/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500961 * tracing_on - enable tracing buffers
962 *
963 * This function enables tracing buffers that may have been
964 * disabled with tracing_off.
965 */
966void tracing_on(void)
967{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400968 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500969}
970EXPORT_SYMBOL_GPL(tracing_on);
971
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500972
973static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500974__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500975{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700976 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500977
978 /* If this is the temp buffer, we need to commit fully */
979 if (this_cpu_read(trace_buffered_event) == event) {
980 /* Length is in event->array[0] */
981 ring_buffer_write(buffer, event->array[0], &event->array[1]);
982 /* Release the temp buffer */
983 this_cpu_dec(trace_buffered_event_cnt);
984 } else
985 ring_buffer_unlock_commit(buffer, event);
986}
987
Steven Rostedt499e5472012-02-22 15:50:28 -0500988/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500989 * __trace_puts - write a constant string into the trace buffer.
990 * @ip: The address of the caller
991 * @str: The constant string to write
992 * @size: The size of the string.
993 */
994int __trace_puts(unsigned long ip, const char *str, int size)
995{
996 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500997 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500998 struct print_entry *entry;
999 unsigned long irq_flags;
1000 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001001 int pc;
1002
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001003 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001004 return 0;
1005
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001006 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001007
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001008 if (unlikely(tracing_selftest_running || tracing_disabled))
1009 return 0;
1010
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001011 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1012
1013 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001014 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001015 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001016 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1017 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001018 if (!event) {
1019 size = 0;
1020 goto out;
1021 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001022
1023 entry = ring_buffer_event_data(event);
1024 entry->ip = ip;
1025
1026 memcpy(&entry->buf, str, size);
1027
1028 /* Add a newline if necessary */
1029 if (entry->buf[size - 1] != '\n') {
1030 entry->buf[size] = '\n';
1031 entry->buf[size + 1] = '\0';
1032 } else
1033 entry->buf[size] = '\0';
1034
1035 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001036 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001037 out:
1038 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001039 return size;
1040}
1041EXPORT_SYMBOL_GPL(__trace_puts);
1042
1043/**
1044 * __trace_bputs - write the pointer to a constant string into trace buffer
1045 * @ip: The address of the caller
1046 * @str: The constant string to write to the buffer to
1047 */
1048int __trace_bputs(unsigned long ip, const char *str)
1049{
1050 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001051 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001052 struct bputs_entry *entry;
1053 unsigned long irq_flags;
1054 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001055 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001056 int pc;
1057
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001058 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001059 return 0;
1060
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001061 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001062
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001063 if (unlikely(tracing_selftest_running || tracing_disabled))
1064 return 0;
1065
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001066 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001067 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001068
1069 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001070 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1071 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001072 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001073 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001074
1075 entry = ring_buffer_event_data(event);
1076 entry->ip = ip;
1077 entry->str = str;
1078
1079 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001080 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001081
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001082 ret = 1;
1083 out:
1084 ring_buffer_nest_end(buffer);
1085 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001086}
1087EXPORT_SYMBOL_GPL(__trace_bputs);
1088
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001089#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001090static void tracing_snapshot_instance_cond(struct trace_array *tr,
1091 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001092{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001093 struct tracer *tracer = tr->current_trace;
1094 unsigned long flags;
1095
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001096 if (in_nmi()) {
1097 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1098 internal_trace_puts("*** snapshot is being ignored ***\n");
1099 return;
1100 }
1101
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001102 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001103 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1104 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001105 tracing_off();
1106 return;
1107 }
1108
1109 /* Note, snapshot can not be used when the tracer uses it */
1110 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001111 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1112 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001113 return;
1114 }
1115
1116 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001117 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001118 local_irq_restore(flags);
1119}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001120
Tom Zanussia35873a2019-02-13 17:42:45 -06001121void tracing_snapshot_instance(struct trace_array *tr)
1122{
1123 tracing_snapshot_instance_cond(tr, NULL);
1124}
1125
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001126/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001127 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001128 *
1129 * This causes a swap between the snapshot buffer and the current live
1130 * tracing buffer. You can use this to take snapshots of the live
1131 * trace when some condition is triggered, but continue to trace.
1132 *
1133 * Note, make sure to allocate the snapshot with either
1134 * a tracing_snapshot_alloc(), or by doing it manually
1135 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1136 *
1137 * If the snapshot buffer is not allocated, it will stop tracing.
1138 * Basically making a permanent snapshot.
1139 */
1140void tracing_snapshot(void)
1141{
1142 struct trace_array *tr = &global_trace;
1143
1144 tracing_snapshot_instance(tr);
1145}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001146EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001147
Tom Zanussia35873a2019-02-13 17:42:45 -06001148/**
1149 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1150 * @tr: The tracing instance to snapshot
1151 * @cond_data: The data to be tested conditionally, and possibly saved
1152 *
1153 * This is the same as tracing_snapshot() except that the snapshot is
1154 * conditional - the snapshot will only happen if the
1155 * cond_snapshot.update() implementation receiving the cond_data
1156 * returns true, which means that the trace array's cond_snapshot
1157 * update() operation used the cond_data to determine whether the
1158 * snapshot should be taken, and if it was, presumably saved it along
1159 * with the snapshot.
1160 */
1161void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1162{
1163 tracing_snapshot_instance_cond(tr, cond_data);
1164}
1165EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1166
1167/**
1168 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1169 * @tr: The tracing instance
1170 *
1171 * When the user enables a conditional snapshot using
1172 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1173 * with the snapshot. This accessor is used to retrieve it.
1174 *
1175 * Should not be called from cond_snapshot.update(), since it takes
1176 * the tr->max_lock lock, which the code calling
1177 * cond_snapshot.update() has already done.
1178 *
1179 * Returns the cond_data associated with the trace array's snapshot.
1180 */
1181void *tracing_cond_snapshot_data(struct trace_array *tr)
1182{
1183 void *cond_data = NULL;
1184
1185 arch_spin_lock(&tr->max_lock);
1186
1187 if (tr->cond_snapshot)
1188 cond_data = tr->cond_snapshot->cond_data;
1189
1190 arch_spin_unlock(&tr->max_lock);
1191
1192 return cond_data;
1193}
1194EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1195
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001196static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1197 struct array_buffer *size_buf, int cpu_id);
1198static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001199
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001200int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001201{
1202 int ret;
1203
1204 if (!tr->allocated_snapshot) {
1205
1206 /* allocate spare buffer */
1207 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001208 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001209 if (ret < 0)
1210 return ret;
1211
1212 tr->allocated_snapshot = true;
1213 }
1214
1215 return 0;
1216}
1217
Fabian Frederickad1438a2014-04-17 21:44:42 +02001218static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001219{
1220 /*
1221 * We don't free the ring buffer. instead, resize it because
1222 * The max_tr ring buffer has some state (e.g. ring->clock) and
1223 * we want preserve it.
1224 */
1225 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1226 set_buffer_entries(&tr->max_buffer, 1);
1227 tracing_reset_online_cpus(&tr->max_buffer);
1228 tr->allocated_snapshot = false;
1229}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001230
1231/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001232 * tracing_alloc_snapshot - allocate snapshot buffer.
1233 *
1234 * This only allocates the snapshot buffer if it isn't already
1235 * allocated - it doesn't also take a snapshot.
1236 *
1237 * This is meant to be used in cases where the snapshot buffer needs
1238 * to be set up for events that can't sleep but need to be able to
1239 * trigger a snapshot.
1240 */
1241int tracing_alloc_snapshot(void)
1242{
1243 struct trace_array *tr = &global_trace;
1244 int ret;
1245
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001246 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001247 WARN_ON(ret < 0);
1248
1249 return ret;
1250}
1251EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1252
1253/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001254 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001255 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001256 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001257 * snapshot buffer if it isn't already allocated. Use this only
1258 * where it is safe to sleep, as the allocation may sleep.
1259 *
1260 * This causes a swap between the snapshot buffer and the current live
1261 * tracing buffer. You can use this to take snapshots of the live
1262 * trace when some condition is triggered, but continue to trace.
1263 */
1264void tracing_snapshot_alloc(void)
1265{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001266 int ret;
1267
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001268 ret = tracing_alloc_snapshot();
1269 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001270 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001271
1272 tracing_snapshot();
1273}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001274EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001275
1276/**
1277 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1278 * @tr: The tracing instance
1279 * @cond_data: User data to associate with the snapshot
1280 * @update: Implementation of the cond_snapshot update function
1281 *
1282 * Check whether the conditional snapshot for the given instance has
1283 * already been enabled, or if the current tracer is already using a
1284 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1285 * save the cond_data and update function inside.
1286 *
1287 * Returns 0 if successful, error otherwise.
1288 */
1289int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1290 cond_update_fn_t update)
1291{
1292 struct cond_snapshot *cond_snapshot;
1293 int ret = 0;
1294
1295 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1296 if (!cond_snapshot)
1297 return -ENOMEM;
1298
1299 cond_snapshot->cond_data = cond_data;
1300 cond_snapshot->update = update;
1301
1302 mutex_lock(&trace_types_lock);
1303
1304 ret = tracing_alloc_snapshot_instance(tr);
1305 if (ret)
1306 goto fail_unlock;
1307
1308 if (tr->current_trace->use_max_tr) {
1309 ret = -EBUSY;
1310 goto fail_unlock;
1311 }
1312
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001313 /*
1314 * The cond_snapshot can only change to NULL without the
1315 * trace_types_lock. We don't care if we race with it going
1316 * to NULL, but we want to make sure that it's not set to
1317 * something other than NULL when we get here, which we can
1318 * do safely with only holding the trace_types_lock and not
1319 * having to take the max_lock.
1320 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001321 if (tr->cond_snapshot) {
1322 ret = -EBUSY;
1323 goto fail_unlock;
1324 }
1325
1326 arch_spin_lock(&tr->max_lock);
1327 tr->cond_snapshot = cond_snapshot;
1328 arch_spin_unlock(&tr->max_lock);
1329
1330 mutex_unlock(&trace_types_lock);
1331
1332 return ret;
1333
1334 fail_unlock:
1335 mutex_unlock(&trace_types_lock);
1336 kfree(cond_snapshot);
1337 return ret;
1338}
1339EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1340
1341/**
1342 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1343 * @tr: The tracing instance
1344 *
1345 * Check whether the conditional snapshot for the given instance is
1346 * enabled; if so, free the cond_snapshot associated with it,
1347 * otherwise return -EINVAL.
1348 *
1349 * Returns 0 if successful, error otherwise.
1350 */
1351int tracing_snapshot_cond_disable(struct trace_array *tr)
1352{
1353 int ret = 0;
1354
1355 arch_spin_lock(&tr->max_lock);
1356
1357 if (!tr->cond_snapshot)
1358 ret = -EINVAL;
1359 else {
1360 kfree(tr->cond_snapshot);
1361 tr->cond_snapshot = NULL;
1362 }
1363
1364 arch_spin_unlock(&tr->max_lock);
1365
1366 return ret;
1367}
1368EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001369#else
1370void tracing_snapshot(void)
1371{
1372 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1373}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001374EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001375void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1376{
1377 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1378}
1379EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001380int tracing_alloc_snapshot(void)
1381{
1382 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1383 return -ENODEV;
1384}
1385EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001386void tracing_snapshot_alloc(void)
1387{
1388 /* Give warning */
1389 tracing_snapshot();
1390}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001391EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001392void *tracing_cond_snapshot_data(struct trace_array *tr)
1393{
1394 return NULL;
1395}
1396EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1397int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1398{
1399 return -ENODEV;
1400}
1401EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1402int tracing_snapshot_cond_disable(struct trace_array *tr)
1403{
1404 return false;
1405}
1406EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001407#endif /* CONFIG_TRACER_SNAPSHOT */
1408
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001409void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001410{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001411 if (tr->array_buffer.buffer)
1412 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001413 /*
1414 * This flag is looked at when buffers haven't been allocated
1415 * yet, or by some tracers (like irqsoff), that just want to
1416 * know if the ring buffer has been disabled, but it can handle
1417 * races of where it gets disabled but we still do a record.
1418 * As the check is in the fast path of the tracers, it is more
1419 * important to be fast than accurate.
1420 */
1421 tr->buffer_disabled = 1;
1422 /* Make the flag seen by readers */
1423 smp_wmb();
1424}
1425
Steven Rostedt499e5472012-02-22 15:50:28 -05001426/**
1427 * tracing_off - turn off tracing buffers
1428 *
1429 * This function stops the tracing buffers from recording data.
1430 * It does not disable any overhead the tracers themselves may
1431 * be causing. This function simply causes all recording to
1432 * the ring buffers to fail.
1433 */
1434void tracing_off(void)
1435{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001436 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001437}
1438EXPORT_SYMBOL_GPL(tracing_off);
1439
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001440void disable_trace_on_warning(void)
1441{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001442 if (__disable_trace_on_warning) {
1443 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1444 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001445 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001446 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001447}
1448
Steven Rostedt499e5472012-02-22 15:50:28 -05001449/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001450 * tracer_tracing_is_on - show real state of ring buffer enabled
1451 * @tr : the trace array to know if ring buffer is enabled
1452 *
1453 * Shows real state of the ring buffer if it is enabled or not.
1454 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001455bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001456{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001457 if (tr->array_buffer.buffer)
1458 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001459 return !tr->buffer_disabled;
1460}
1461
Steven Rostedt499e5472012-02-22 15:50:28 -05001462/**
1463 * tracing_is_on - show state of ring buffers enabled
1464 */
1465int tracing_is_on(void)
1466{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001467 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001468}
1469EXPORT_SYMBOL_GPL(tracing_is_on);
1470
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001471static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001472{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001473 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001474
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475 if (!str)
1476 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001477 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001478 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001479 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001480 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001481 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482 return 1;
1483}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001484__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485
Tim Bird0e950172010-02-25 15:36:43 -08001486static int __init set_tracing_thresh(char *str)
1487{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001488 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001489 int ret;
1490
1491 if (!str)
1492 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001493 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001494 if (ret < 0)
1495 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001496 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001497 return 1;
1498}
1499__setup("tracing_thresh=", set_tracing_thresh);
1500
Steven Rostedt57f50be2008-05-12 21:20:44 +02001501unsigned long nsecs_to_usecs(unsigned long nsecs)
1502{
1503 return nsecs / 1000;
1504}
1505
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001506/*
1507 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001508 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001509 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001510 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001511 */
1512#undef C
1513#define C(a, b) b
1514
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001515/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001517 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001518 NULL
1519};
1520
Zhaolei5079f322009-08-25 16:12:56 +08001521static struct {
1522 u64 (*func)(void);
1523 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001524 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001525} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001526 { trace_clock_local, "local", 1 },
1527 { trace_clock_global, "global", 1 },
1528 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001529 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001530 { trace_clock, "perf", 1 },
1531 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001532 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001533 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001534 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001535};
1536
Tom Zanussi860f9f62018-01-15 20:51:48 -06001537bool trace_clock_in_ns(struct trace_array *tr)
1538{
1539 if (trace_clocks[tr->clock_id].in_ns)
1540 return true;
1541
1542 return false;
1543}
1544
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001545/*
1546 * trace_parser_get_init - gets the buffer for trace parser
1547 */
1548int trace_parser_get_init(struct trace_parser *parser, int size)
1549{
1550 memset(parser, 0, sizeof(*parser));
1551
1552 parser->buffer = kmalloc(size, GFP_KERNEL);
1553 if (!parser->buffer)
1554 return 1;
1555
1556 parser->size = size;
1557 return 0;
1558}
1559
1560/*
1561 * trace_parser_put - frees the buffer for trace parser
1562 */
1563void trace_parser_put(struct trace_parser *parser)
1564{
1565 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001566 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001567}
1568
1569/*
1570 * trace_get_user - reads the user input string separated by space
1571 * (matched by isspace(ch))
1572 *
1573 * For each string found the 'struct trace_parser' is updated,
1574 * and the function returns.
1575 *
1576 * Returns number of bytes read.
1577 *
1578 * See kernel/trace/trace.h for 'struct trace_parser' details.
1579 */
1580int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1581 size_t cnt, loff_t *ppos)
1582{
1583 char ch;
1584 size_t read = 0;
1585 ssize_t ret;
1586
1587 if (!*ppos)
1588 trace_parser_clear(parser);
1589
1590 ret = get_user(ch, ubuf++);
1591 if (ret)
1592 goto out;
1593
1594 read++;
1595 cnt--;
1596
1597 /*
1598 * The parser is not finished with the last write,
1599 * continue reading the user input without skipping spaces.
1600 */
1601 if (!parser->cont) {
1602 /* skip white space */
1603 while (cnt && isspace(ch)) {
1604 ret = get_user(ch, ubuf++);
1605 if (ret)
1606 goto out;
1607 read++;
1608 cnt--;
1609 }
1610
Changbin Du76638d92018-01-16 17:02:29 +08001611 parser->idx = 0;
1612
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001613 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001614 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001615 *ppos += read;
1616 ret = read;
1617 goto out;
1618 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001619 }
1620
1621 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001622 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001623 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001624 parser->buffer[parser->idx++] = ch;
1625 else {
1626 ret = -EINVAL;
1627 goto out;
1628 }
1629 ret = get_user(ch, ubuf++);
1630 if (ret)
1631 goto out;
1632 read++;
1633 cnt--;
1634 }
1635
1636 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001637 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001638 parser->buffer[parser->idx] = 0;
1639 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001640 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001641 parser->cont = true;
1642 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001643 /* Make sure the parsed string always terminates with '\0'. */
1644 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001645 } else {
1646 ret = -EINVAL;
1647 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001648 }
1649
1650 *ppos += read;
1651 ret = read;
1652
1653out:
1654 return ret;
1655}
1656
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001657/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001658static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001659{
1660 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001661
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001662 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001663 return -EBUSY;
1664
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001665 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001666 if (cnt > len)
1667 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001668 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001669
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001670 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001671 return cnt;
1672}
1673
Tim Bird0e950172010-02-25 15:36:43 -08001674unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001675static const struct file_operations tracing_max_lat_fops;
1676
1677#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1678 defined(CONFIG_FSNOTIFY)
1679
1680static struct workqueue_struct *fsnotify_wq;
1681
1682static void latency_fsnotify_workfn(struct work_struct *work)
1683{
1684 struct trace_array *tr = container_of(work, struct trace_array,
1685 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001686 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001687}
1688
1689static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1690{
1691 struct trace_array *tr = container_of(iwork, struct trace_array,
1692 fsnotify_irqwork);
1693 queue_work(fsnotify_wq, &tr->fsnotify_work);
1694}
1695
1696static void trace_create_maxlat_file(struct trace_array *tr,
1697 struct dentry *d_tracer)
1698{
1699 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1700 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1701 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1702 d_tracer, &tr->max_latency,
1703 &tracing_max_lat_fops);
1704}
1705
1706__init static int latency_fsnotify_init(void)
1707{
1708 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1709 WQ_UNBOUND | WQ_HIGHPRI, 0);
1710 if (!fsnotify_wq) {
1711 pr_err("Unable to allocate tr_max_lat_wq\n");
1712 return -ENOMEM;
1713 }
1714 return 0;
1715}
1716
1717late_initcall_sync(latency_fsnotify_init);
1718
1719void latency_fsnotify(struct trace_array *tr)
1720{
1721 if (!fsnotify_wq)
1722 return;
1723 /*
1724 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1725 * possible that we are called from __schedule() or do_idle(), which
1726 * could cause a deadlock.
1727 */
1728 irq_work_queue(&tr->fsnotify_irqwork);
1729}
1730
1731/*
1732 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1733 * defined(CONFIG_FSNOTIFY)
1734 */
1735#else
1736
1737#define trace_create_maxlat_file(tr, d_tracer) \
1738 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1739 &tr->max_latency, &tracing_max_lat_fops)
1740
1741#endif
Tim Bird0e950172010-02-25 15:36:43 -08001742
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001743#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001744/*
1745 * Copy the new maximum trace into the separate maximum-trace
1746 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001747 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001748 */
1749static void
1750__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1751{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001752 struct array_buffer *trace_buf = &tr->array_buffer;
1753 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001754 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1755 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001756
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001757 max_buf->cpu = cpu;
1758 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001759
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001760 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001761 max_data->critical_start = data->critical_start;
1762 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001763
Tom Zanussi85f726a2019-03-05 10:12:00 -06001764 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001765 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001766 /*
1767 * If tsk == current, then use current_uid(), as that does not use
1768 * RCU. The irq tracer can be called out of RCU scope.
1769 */
1770 if (tsk == current)
1771 max_data->uid = current_uid();
1772 else
1773 max_data->uid = task_uid(tsk);
1774
Steven Rostedt8248ac02009-09-02 12:27:41 -04001775 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1776 max_data->policy = tsk->policy;
1777 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001778
1779 /* record this tasks comm */
1780 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001781 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001782}
1783
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001784/**
1785 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1786 * @tr: tracer
1787 * @tsk: the task with the latency
1788 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001789 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001790 *
1791 * Flip the buffers between the @tr and the max_tr and record information
1792 * about which task was the cause of this latency.
1793 */
Ingo Molnare309b412008-05-12 21:20:51 +02001794void
Tom Zanussia35873a2019-02-13 17:42:45 -06001795update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1796 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001797{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001798 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001799 return;
1800
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001801 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001802
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001803 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001804 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001805 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001806 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001807 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001808
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001809 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001810
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001811 /* Inherit the recordable setting from array_buffer */
1812 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001813 ring_buffer_record_on(tr->max_buffer.buffer);
1814 else
1815 ring_buffer_record_off(tr->max_buffer.buffer);
1816
Tom Zanussia35873a2019-02-13 17:42:45 -06001817#ifdef CONFIG_TRACER_SNAPSHOT
1818 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1819 goto out_unlock;
1820#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001821 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001822
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001823 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001824
1825 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001826 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001827}
1828
1829/**
1830 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001831 * @tr: tracer
1832 * @tsk: task with the latency
1833 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001834 *
1835 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001836 */
Ingo Molnare309b412008-05-12 21:20:51 +02001837void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001838update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1839{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001840 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001841
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001842 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001843 return;
1844
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001845 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001846 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001847 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001848 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001849 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001850 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001851
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001852 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001853
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001854 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001855
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001856 if (ret == -EBUSY) {
1857 /*
1858 * We failed to swap the buffer due to a commit taking
1859 * place on this CPU. We fail to record, but we reset
1860 * the max trace buffer (no one writes directly to it)
1861 * and flag that it failed.
1862 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001863 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001864 "Failed to swap buffers due to commit in progress\n");
1865 }
1866
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001867 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001868
1869 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001870 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001871}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001872#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001873
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001874static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001875{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001876 /* Iterators are static, they should be filled or empty */
1877 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001878 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001879
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001880 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001881 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001882}
1883
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001884#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001885static bool selftests_can_run;
1886
1887struct trace_selftests {
1888 struct list_head list;
1889 struct tracer *type;
1890};
1891
1892static LIST_HEAD(postponed_selftests);
1893
1894static int save_selftest(struct tracer *type)
1895{
1896 struct trace_selftests *selftest;
1897
1898 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1899 if (!selftest)
1900 return -ENOMEM;
1901
1902 selftest->type = type;
1903 list_add(&selftest->list, &postponed_selftests);
1904 return 0;
1905}
1906
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001907static int run_tracer_selftest(struct tracer *type)
1908{
1909 struct trace_array *tr = &global_trace;
1910 struct tracer *saved_tracer = tr->current_trace;
1911 int ret;
1912
1913 if (!type->selftest || tracing_selftest_disabled)
1914 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001915
1916 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001917 * If a tracer registers early in boot up (before scheduling is
1918 * initialized and such), then do not run its selftests yet.
1919 * Instead, run it a little later in the boot process.
1920 */
1921 if (!selftests_can_run)
1922 return save_selftest(type);
1923
1924 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001925 * Run a selftest on this tracer.
1926 * Here we reset the trace buffer, and set the current
1927 * tracer to be this tracer. The tracer can then run some
1928 * internal tracing to verify that everything is in order.
1929 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001930 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001931 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001932
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001933 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001935#ifdef CONFIG_TRACER_MAX_TRACE
1936 if (type->use_max_tr) {
1937 /* If we expanded the buffers, make sure the max is expanded too */
1938 if (ring_buffer_expanded)
1939 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1940 RING_BUFFER_ALL_CPUS);
1941 tr->allocated_snapshot = true;
1942 }
1943#endif
1944
1945 /* the test is responsible for initializing and enabling */
1946 pr_info("Testing tracer %s: ", type->name);
1947 ret = type->selftest(type, tr);
1948 /* the test is responsible for resetting too */
1949 tr->current_trace = saved_tracer;
1950 if (ret) {
1951 printk(KERN_CONT "FAILED!\n");
1952 /* Add the warning after printing 'FAILED' */
1953 WARN_ON(1);
1954 return -1;
1955 }
1956 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001957 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001958
1959#ifdef CONFIG_TRACER_MAX_TRACE
1960 if (type->use_max_tr) {
1961 tr->allocated_snapshot = false;
1962
1963 /* Shrink the max buffer again */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, 1,
1966 RING_BUFFER_ALL_CPUS);
1967 }
1968#endif
1969
1970 printk(KERN_CONT "PASSED\n");
1971 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001972}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001973
1974static __init int init_trace_selftests(void)
1975{
1976 struct trace_selftests *p, *n;
1977 struct tracer *t, **last;
1978 int ret;
1979
1980 selftests_can_run = true;
1981
1982 mutex_lock(&trace_types_lock);
1983
1984 if (list_empty(&postponed_selftests))
1985 goto out;
1986
1987 pr_info("Running postponed tracer tests:\n");
1988
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001989 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001990 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001991 /* This loop can take minutes when sanitizers are enabled, so
1992 * lets make sure we allow RCU processing.
1993 */
1994 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001995 ret = run_tracer_selftest(p->type);
1996 /* If the test fails, then warn and remove from available_tracers */
1997 if (ret < 0) {
1998 WARN(1, "tracer: %s failed selftest, disabling\n",
1999 p->type->name);
2000 last = &trace_types;
2001 for (t = trace_types; t; t = t->next) {
2002 if (t == p->type) {
2003 *last = t->next;
2004 break;
2005 }
2006 last = &t->next;
2007 }
2008 }
2009 list_del(&p->list);
2010 kfree(p);
2011 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002012 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002013
2014 out:
2015 mutex_unlock(&trace_types_lock);
2016
2017 return 0;
2018}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002019core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002020#else
2021static inline int run_tracer_selftest(struct tracer *type)
2022{
2023 return 0;
2024}
2025#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002026
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002027static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2028
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002029static void __init apply_trace_boot_options(void);
2030
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002031/**
2032 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002033 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002034 *
2035 * Register a new plugin tracer.
2036 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002037int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002038{
2039 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002040 int ret = 0;
2041
2042 if (!type->name) {
2043 pr_info("Tracer must have a name\n");
2044 return -1;
2045 }
2046
Dan Carpenter24a461d2010-07-10 12:06:44 +02002047 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002048 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2049 return -1;
2050 }
2051
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002052 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002053 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002054 type->name);
2055 return -EPERM;
2056 }
2057
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002058 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002059
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002060 tracing_selftest_running = true;
2061
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002062 for (t = trace_types; t; t = t->next) {
2063 if (strcmp(type->name, t->name) == 0) {
2064 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002065 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002066 type->name);
2067 ret = -1;
2068 goto out;
2069 }
2070 }
2071
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002072 if (!type->set_flag)
2073 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002074 if (!type->flags) {
2075 /*allocate a dummy tracer_flags*/
2076 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002077 if (!type->flags) {
2078 ret = -ENOMEM;
2079 goto out;
2080 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002081 type->flags->val = 0;
2082 type->flags->opts = dummy_tracer_opt;
2083 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002084 if (!type->flags->opts)
2085 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002086
Chunyu Hud39cdd22016-03-08 21:37:01 +08002087 /* store the tracer for __set_tracer_option */
2088 type->flags->trace = type;
2089
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002090 ret = run_tracer_selftest(type);
2091 if (ret < 0)
2092 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002093
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002094 type->next = trace_types;
2095 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002096 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002097
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002098 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002099 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002100 mutex_unlock(&trace_types_lock);
2101
Steven Rostedtdac74942009-02-05 01:13:38 -05002102 if (ret || !default_bootup_tracer)
2103 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002104
Li Zefanee6c2c12009-09-18 14:06:47 +08002105 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002106 goto out_unlock;
2107
2108 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2109 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002110 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002111 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002112
2113 apply_trace_boot_options();
2114
Steven Rostedtdac74942009-02-05 01:13:38 -05002115 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002116 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05002117#ifdef CONFIG_FTRACE_STARTUP_TEST
2118 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
2119 type->name);
2120#endif
2121
2122 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002123 return ret;
2124}
2125
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002126static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002127{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002128 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002129
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002130 if (!buffer)
2131 return;
2132
Steven Rostedtf6339032009-09-04 12:35:16 -04002133 ring_buffer_record_disable(buffer);
2134
2135 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002136 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002137 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002138
2139 ring_buffer_record_enable(buffer);
2140}
2141
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002142void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002143{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002144 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002145
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002146 if (!buffer)
2147 return;
2148
Steven Rostedt621968c2009-09-04 12:02:35 -04002149 ring_buffer_record_disable(buffer);
2150
2151 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002152 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002153
Alexander Z Lam94571582013-08-02 18:36:16 -07002154 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002155
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002156 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002157
2158 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002159}
2160
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002161/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002162void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002163{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002164 struct trace_array *tr;
2165
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002166 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002167 if (!tr->clear_trace)
2168 continue;
2169 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002170 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002171#ifdef CONFIG_TRACER_MAX_TRACE
2172 tracing_reset_online_cpus(&tr->max_buffer);
2173#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002174 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002175}
2176
Joel Fernandesd914ba32017-06-26 19:01:55 -07002177static int *tgid_map;
2178
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002179#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002180#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002181static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002182struct saved_cmdlines_buffer {
2183 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2184 unsigned *map_cmdline_to_pid;
2185 unsigned cmdline_num;
2186 int cmdline_idx;
2187 char *saved_cmdlines;
2188};
2189static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002190
Steven Rostedt25b0b442008-05-12 21:21:00 +02002191/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002192static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002193
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002194static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002195{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002196 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2197}
2198
2199static inline void set_cmdline(int idx, const char *cmdline)
2200{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002201 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002202}
2203
2204static int allocate_cmdlines_buffer(unsigned int val,
2205 struct saved_cmdlines_buffer *s)
2206{
Kees Cook6da2ec52018-06-12 13:55:00 -07002207 s->map_cmdline_to_pid = kmalloc_array(val,
2208 sizeof(*s->map_cmdline_to_pid),
2209 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002210 if (!s->map_cmdline_to_pid)
2211 return -ENOMEM;
2212
Kees Cook6da2ec52018-06-12 13:55:00 -07002213 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002214 if (!s->saved_cmdlines) {
2215 kfree(s->map_cmdline_to_pid);
2216 return -ENOMEM;
2217 }
2218
2219 s->cmdline_idx = 0;
2220 s->cmdline_num = val;
2221 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2222 sizeof(s->map_pid_to_cmdline));
2223 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2224 val * sizeof(*s->map_cmdline_to_pid));
2225
2226 return 0;
2227}
2228
2229static int trace_create_savedcmd(void)
2230{
2231 int ret;
2232
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002233 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002234 if (!savedcmd)
2235 return -ENOMEM;
2236
2237 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2238 if (ret < 0) {
2239 kfree(savedcmd);
2240 savedcmd = NULL;
2241 return -ENOMEM;
2242 }
2243
2244 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245}
2246
Carsten Emdeb5130b12009-09-13 01:43:07 +02002247int is_tracing_stopped(void)
2248{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002249 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002250}
2251
Steven Rostedt0f048702008-11-05 16:05:44 -05002252/**
2253 * tracing_start - quick start of the tracer
2254 *
2255 * If tracing is enabled but was stopped by tracing_stop,
2256 * this will start the tracer back up.
2257 */
2258void tracing_start(void)
2259{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002260 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002261 unsigned long flags;
2262
2263 if (tracing_disabled)
2264 return;
2265
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002266 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2267 if (--global_trace.stop_count) {
2268 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002269 /* Someone screwed up their debugging */
2270 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002271 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002272 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002273 goto out;
2274 }
2275
Steven Rostedta2f80712010-03-12 19:56:00 -05002276 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002277 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002278
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002279 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002280 if (buffer)
2281 ring_buffer_record_enable(buffer);
2282
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002283#ifdef CONFIG_TRACER_MAX_TRACE
2284 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002285 if (buffer)
2286 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002287#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002288
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002289 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002290
Steven Rostedt0f048702008-11-05 16:05:44 -05002291 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002292 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2293}
2294
2295static void tracing_start_tr(struct trace_array *tr)
2296{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002297 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002298 unsigned long flags;
2299
2300 if (tracing_disabled)
2301 return;
2302
2303 /* If global, we need to also start the max tracer */
2304 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2305 return tracing_start();
2306
2307 raw_spin_lock_irqsave(&tr->start_lock, flags);
2308
2309 if (--tr->stop_count) {
2310 if (tr->stop_count < 0) {
2311 /* Someone screwed up their debugging */
2312 WARN_ON_ONCE(1);
2313 tr->stop_count = 0;
2314 }
2315 goto out;
2316 }
2317
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002318 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002319 if (buffer)
2320 ring_buffer_record_enable(buffer);
2321
2322 out:
2323 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002324}
2325
2326/**
2327 * tracing_stop - quick stop of the tracer
2328 *
2329 * Light weight way to stop tracing. Use in conjunction with
2330 * tracing_start.
2331 */
2332void tracing_stop(void)
2333{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002334 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002335 unsigned long flags;
2336
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002337 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2338 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002339 goto out;
2340
Steven Rostedta2f80712010-03-12 19:56:00 -05002341 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002342 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002343
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002344 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002345 if (buffer)
2346 ring_buffer_record_disable(buffer);
2347
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002348#ifdef CONFIG_TRACER_MAX_TRACE
2349 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002350 if (buffer)
2351 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002352#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002353
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002354 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002355
Steven Rostedt0f048702008-11-05 16:05:44 -05002356 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002357 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2358}
2359
2360static void tracing_stop_tr(struct trace_array *tr)
2361{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002362 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002363 unsigned long flags;
2364
2365 /* If global, we need to also stop the max tracer */
2366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2367 return tracing_stop();
2368
2369 raw_spin_lock_irqsave(&tr->start_lock, flags);
2370 if (tr->stop_count++)
2371 goto out;
2372
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002373 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002374 if (buffer)
2375 ring_buffer_record_disable(buffer);
2376
2377 out:
2378 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002379}
2380
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002381static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382{
Carsten Emdea635cf02009-03-18 09:00:41 +01002383 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002384
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002385 /* treat recording of idle task as a success */
2386 if (!tsk->pid)
2387 return 1;
2388
2389 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002390 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391
2392 /*
2393 * It's not the end of the world if we don't get
2394 * the lock, but we also don't want to spin
2395 * nor do we want to disable interrupts,
2396 * so if we miss here, then better luck next time.
2397 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002398 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002399 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002400
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002401 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002402 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002403 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002404
Carsten Emdea635cf02009-03-18 09:00:41 +01002405 /*
2406 * Check whether the cmdline buffer at idx has a pid
2407 * mapped. We are going to overwrite that entry so we
2408 * need to clear the map_pid_to_cmdline. Otherwise we
2409 * would read the new comm for the old pid.
2410 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002411 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002412 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002413 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002415 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2416 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002418 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002419 }
2420
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002421 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002422
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002423 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002424
2425 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002426}
2427
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002428static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 unsigned map;
2431
Steven Rostedt4ca530852009-03-16 19:20:15 -04002432 if (!pid) {
2433 strcpy(comm, "<idle>");
2434 return;
2435 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002436
Steven Rostedt74bf4072010-01-25 15:11:53 -05002437 if (WARN_ON_ONCE(pid < 0)) {
2438 strcpy(comm, "<XXX>");
2439 return;
2440 }
2441
Steven Rostedt4ca530852009-03-16 19:20:15 -04002442 if (pid > PID_MAX_DEFAULT) {
2443 strcpy(comm, "<...>");
2444 return;
2445 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002447 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002448 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302449 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002450 else
2451 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002452}
2453
2454void trace_find_cmdline(int pid, char comm[])
2455{
2456 preempt_disable();
2457 arch_spin_lock(&trace_cmdline_lock);
2458
2459 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002460
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002461 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002462 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002463}
2464
Joel Fernandesd914ba32017-06-26 19:01:55 -07002465int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002466{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002467 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2468 return 0;
2469
2470 return tgid_map[pid];
2471}
2472
2473static int trace_save_tgid(struct task_struct *tsk)
2474{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002475 /* treat recording of idle task as a success */
2476 if (!tsk->pid)
2477 return 1;
2478
2479 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002480 return 0;
2481
2482 tgid_map[tsk->pid] = tsk->tgid;
2483 return 1;
2484}
2485
2486static bool tracing_record_taskinfo_skip(int flags)
2487{
2488 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2489 return true;
2490 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2491 return true;
2492 if (!__this_cpu_read(trace_taskinfo_save))
2493 return true;
2494 return false;
2495}
2496
2497/**
2498 * tracing_record_taskinfo - record the task info of a task
2499 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002500 * @task: task to record
2501 * @flags: TRACE_RECORD_CMDLINE for recording comm
2502 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002503 */
2504void tracing_record_taskinfo(struct task_struct *task, int flags)
2505{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002506 bool done;
2507
Joel Fernandesd914ba32017-06-26 19:01:55 -07002508 if (tracing_record_taskinfo_skip(flags))
2509 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002510
2511 /*
2512 * Record as much task information as possible. If some fail, continue
2513 * to try to record the others.
2514 */
2515 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2516 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2517
2518 /* If recording any information failed, retry again soon. */
2519 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520 return;
2521
Joel Fernandesd914ba32017-06-26 19:01:55 -07002522 __this_cpu_write(trace_taskinfo_save, false);
2523}
2524
2525/**
2526 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2527 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002528 * @prev: previous task during sched_switch
2529 * @next: next task during sched_switch
2530 * @flags: TRACE_RECORD_CMDLINE for recording comm
2531 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002532 */
2533void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2534 struct task_struct *next, int flags)
2535{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002536 bool done;
2537
Joel Fernandesd914ba32017-06-26 19:01:55 -07002538 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002539 return;
2540
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002541 /*
2542 * Record as much task information as possible. If some fail, continue
2543 * to try to record the others.
2544 */
2545 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2546 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2547 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2548 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002549
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002550 /* If recording any information failed, retry again soon. */
2551 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002552 return;
2553
2554 __this_cpu_write(trace_taskinfo_save, false);
2555}
2556
2557/* Helpers to record a specific task information */
2558void tracing_record_cmdline(struct task_struct *task)
2559{
2560 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2561}
2562
2563void tracing_record_tgid(struct task_struct *task)
2564{
2565 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566}
2567
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002568/*
2569 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2570 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2571 * simplifies those functions and keeps them in sync.
2572 */
2573enum print_line_t trace_handle_return(struct trace_seq *s)
2574{
2575 return trace_seq_has_overflowed(s) ?
2576 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2577}
2578EXPORT_SYMBOL_GPL(trace_handle_return);
2579
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002580void
Cong Wang46710f32019-05-25 09:57:59 -07002581tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2582 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583{
2584 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585
Steven Rostedt777e2082008-09-29 23:02:42 -04002586 entry->preempt_count = pc & 0xff;
2587 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002588 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002589 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002590#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002591 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002592#else
2593 TRACE_FLAG_IRQS_NOSUPPORT |
2594#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002595 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302597 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002598 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2599 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002600}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002601EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002602
Steven Rostedte77405a2009-09-02 14:17:06 -04002603struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002604trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002605 int type,
2606 unsigned long len,
2607 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002608{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002609 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002610}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002611
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002612DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2613DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2614static int trace_buffered_event_ref;
2615
2616/**
2617 * trace_buffered_event_enable - enable buffering events
2618 *
2619 * When events are being filtered, it is quicker to use a temporary
2620 * buffer to write the event data into if there's a likely chance
2621 * that it will not be committed. The discard of the ring buffer
2622 * is not as fast as committing, and is much slower than copying
2623 * a commit.
2624 *
2625 * When an event is to be filtered, allocate per cpu buffers to
2626 * write the event data into, and if the event is filtered and discarded
2627 * it is simply dropped, otherwise, the entire data is to be committed
2628 * in one shot.
2629 */
2630void trace_buffered_event_enable(void)
2631{
2632 struct ring_buffer_event *event;
2633 struct page *page;
2634 int cpu;
2635
2636 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2637
2638 if (trace_buffered_event_ref++)
2639 return;
2640
2641 for_each_tracing_cpu(cpu) {
2642 page = alloc_pages_node(cpu_to_node(cpu),
2643 GFP_KERNEL | __GFP_NORETRY, 0);
2644 if (!page)
2645 goto failed;
2646
2647 event = page_address(page);
2648 memset(event, 0, sizeof(*event));
2649
2650 per_cpu(trace_buffered_event, cpu) = event;
2651
2652 preempt_disable();
2653 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002654 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002655 per_cpu(trace_buffered_event, cpu))
2656 WARN_ON_ONCE(1);
2657 preempt_enable();
2658 }
2659
2660 return;
2661 failed:
2662 trace_buffered_event_disable();
2663}
2664
2665static void enable_trace_buffered_event(void *data)
2666{
2667 /* Probably not needed, but do it anyway */
2668 smp_rmb();
2669 this_cpu_dec(trace_buffered_event_cnt);
2670}
2671
2672static void disable_trace_buffered_event(void *data)
2673{
2674 this_cpu_inc(trace_buffered_event_cnt);
2675}
2676
2677/**
2678 * trace_buffered_event_disable - disable buffering events
2679 *
2680 * When a filter is removed, it is faster to not use the buffered
2681 * events, and to commit directly into the ring buffer. Free up
2682 * the temp buffers when there are no more users. This requires
2683 * special synchronization with current events.
2684 */
2685void trace_buffered_event_disable(void)
2686{
2687 int cpu;
2688
2689 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2690
2691 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2692 return;
2693
2694 if (--trace_buffered_event_ref)
2695 return;
2696
2697 preempt_disable();
2698 /* For each CPU, set the buffer as used. */
2699 smp_call_function_many(tracing_buffer_mask,
2700 disable_trace_buffered_event, NULL, 1);
2701 preempt_enable();
2702
2703 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002704 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002705
2706 for_each_tracing_cpu(cpu) {
2707 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2708 per_cpu(trace_buffered_event, cpu) = NULL;
2709 }
2710 /*
2711 * Make sure trace_buffered_event is NULL before clearing
2712 * trace_buffered_event_cnt.
2713 */
2714 smp_wmb();
2715
2716 preempt_disable();
2717 /* Do the work on each cpu */
2718 smp_call_function_many(tracing_buffer_mask,
2719 enable_trace_buffered_event, NULL, 1);
2720 preempt_enable();
2721}
2722
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002723static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002724
Steven Rostedtef5580d2009-02-27 19:38:04 -05002725struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002726trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002727 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002728 int type, unsigned long len,
2729 unsigned long flags, int pc)
2730{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002731 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002732 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002733
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002734 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002735
Tom Zanussi00b41452018-01-15 20:51:39 -06002736 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002737 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2738 (entry = this_cpu_read(trace_buffered_event))) {
2739 /* Try to use the per cpu buffer first */
2740 val = this_cpu_inc_return(trace_buffered_event_cnt);
2741 if (val == 1) {
2742 trace_event_setup(entry, type, flags, pc);
2743 entry->array[0] = len;
2744 return entry;
2745 }
2746 this_cpu_dec(trace_buffered_event_cnt);
2747 }
2748
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002749 entry = __trace_buffer_lock_reserve(*current_rb,
2750 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002751 /*
2752 * If tracing is off, but we have triggers enabled
2753 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002754 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002755 * safe and will not be recorded anywhere.
2756 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002757 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002758 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002759 entry = __trace_buffer_lock_reserve(*current_rb,
2760 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002761 }
2762 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002763}
2764EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2765
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002766static DEFINE_SPINLOCK(tracepoint_iter_lock);
2767static DEFINE_MUTEX(tracepoint_printk_mutex);
2768
2769static void output_printk(struct trace_event_buffer *fbuffer)
2770{
2771 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002772 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002773 struct trace_event *event;
2774 unsigned long flags;
2775 struct trace_iterator *iter = tracepoint_print_iter;
2776
2777 /* We should never get here if iter is NULL */
2778 if (WARN_ON_ONCE(!iter))
2779 return;
2780
2781 event_call = fbuffer->trace_file->event_call;
2782 if (!event_call || !event_call->event.funcs ||
2783 !event_call->event.funcs->trace)
2784 return;
2785
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002786 file = fbuffer->trace_file;
2787 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2788 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2789 !filter_match_preds(file->filter, fbuffer->entry)))
2790 return;
2791
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002792 event = &fbuffer->trace_file->event_call->event;
2793
2794 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2795 trace_seq_init(&iter->seq);
2796 iter->ent = fbuffer->entry;
2797 event_call->event.funcs->trace(iter, 0, event);
2798 trace_seq_putc(&iter->seq, 0);
2799 printk("%s", iter->seq.buffer);
2800
2801 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2802}
2803
2804int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002805 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002806 loff_t *ppos)
2807{
2808 int save_tracepoint_printk;
2809 int ret;
2810
2811 mutex_lock(&tracepoint_printk_mutex);
2812 save_tracepoint_printk = tracepoint_printk;
2813
2814 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2815
2816 /*
2817 * This will force exiting early, as tracepoint_printk
2818 * is always zero when tracepoint_printk_iter is not allocated
2819 */
2820 if (!tracepoint_print_iter)
2821 tracepoint_printk = 0;
2822
2823 if (save_tracepoint_printk == tracepoint_printk)
2824 goto out;
2825
2826 if (tracepoint_printk)
2827 static_key_enable(&tracepoint_printk_key.key);
2828 else
2829 static_key_disable(&tracepoint_printk_key.key);
2830
2831 out:
2832 mutex_unlock(&tracepoint_printk_mutex);
2833
2834 return ret;
2835}
2836
2837void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2838{
2839 if (static_key_false(&tracepoint_printk_key.key))
2840 output_printk(fbuffer);
2841
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002842 if (static_branch_unlikely(&trace_event_exports_enabled))
2843 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002844 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002845 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002846 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002847}
2848EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2849
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002850/*
2851 * Skip 3:
2852 *
2853 * trace_buffer_unlock_commit_regs()
2854 * trace_event_buffer_commit()
2855 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302856 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002857# define STACK_SKIP 3
2858
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002859void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002860 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002861 struct ring_buffer_event *event,
2862 unsigned long flags, int pc,
2863 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002864{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002865 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002866
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002867 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002868 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002869 * Note, we can still get here via blktrace, wakeup tracer
2870 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002871 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002872 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002873 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05002874 ftrace_trace_userstack(tr, buffer, flags, pc);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002875}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002876
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002877/*
2878 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2879 */
2880void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002881trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002882 struct ring_buffer_event *event)
2883{
2884 __buffer_unlock_commit(buffer, event);
2885}
2886
Ingo Molnare309b412008-05-12 21:20:51 +02002887void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002888trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002889 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2890 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002891{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002892 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002893 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002894 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002895 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002896
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002897 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2898 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002899 if (!event)
2900 return;
2901 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002902 entry->ip = ip;
2903 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002904
Chunyan Zhang478409d2016-11-21 15:57:18 +08002905 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002906 if (static_branch_unlikely(&trace_function_exports_enabled))
2907 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002908 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002909 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002910}
2911
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002912#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002913
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002914/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2915#define FTRACE_KSTACK_NESTING 4
2916
2917#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2918
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002919struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002920 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002921};
2922
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002923
2924struct ftrace_stacks {
2925 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2926};
2927
2928static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002929static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2930
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002931static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002932 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002933 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002934{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002935 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002936 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002937 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002938 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002939 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002940 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002941
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002942 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002943 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002944 * If regs is set, then these functions will not be in the way.
2945 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002946#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002947 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002948 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002949#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002950
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002951 preempt_disable_notrace();
2952
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002953 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2954
2955 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08002956 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002957 goto out;
2958
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002959 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002960 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2961 * interrupt will either see the value pre increment or post
2962 * increment. If the interrupt happens pre increment it will have
2963 * restored the counter when it returns. We just need a barrier to
2964 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002965 */
2966 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002967
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002968 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002969 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002970
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002971 if (regs) {
2972 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2973 size, skip);
2974 } else {
2975 nr_entries = stack_trace_save(fstack->calls, size, skip);
2976 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002977
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002978 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002979 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2980 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002981 if (!event)
2982 goto out;
2983 entry = ring_buffer_event_data(event);
2984
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002985 memcpy(&entry->caller, fstack->calls, size);
2986 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002987
Tom Zanussif306cc82013-10-24 08:34:17 -05002988 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002989 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002990
2991 out:
2992 /* Again, don't let gcc optimize things here */
2993 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002994 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002995 preempt_enable_notrace();
2996
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002997}
2998
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002999static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003000 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003001 unsigned long flags,
3002 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003003{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003004 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003005 return;
3006
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003007 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003008}
3009
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003010void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3011 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04003012{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003013 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003014
3015 if (rcu_is_watching()) {
3016 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3017 return;
3018 }
3019
3020 /*
3021 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3022 * but if the above rcu_is_watching() failed, then the NMI
3023 * triggered someplace critical, and rcu_irq_enter() should
3024 * not be called from NMI.
3025 */
3026 if (unlikely(in_nmi()))
3027 return;
3028
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003029 rcu_irq_enter_irqson();
3030 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3031 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003032}
3033
Steven Rostedt03889382009-12-11 09:48:22 -05003034/**
3035 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003036 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003037 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003038void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003039{
3040 unsigned long flags;
3041
3042 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003043 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003044
3045 local_save_flags(flags);
3046
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003047#ifndef CONFIG_UNWINDER_ORC
3048 /* Skip 1 to skip this function. */
3049 skip++;
3050#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003051 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003052 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003053}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003054EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003055
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003056#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003057static DEFINE_PER_CPU(int, user_stack_count);
3058
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003059static void
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003060ftrace_trace_userstack(struct trace_array *tr,
3061 struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003062{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003063 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003064 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003065 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003066
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003068 return;
3069
Steven Rostedtb6345872010-03-12 20:03:30 -05003070 /*
3071 * NMIs can not handle page faults, even with fix ups.
3072 * The save user stack can (and often does) fault.
3073 */
3074 if (unlikely(in_nmi()))
3075 return;
3076
Steven Rostedt91e86e52010-11-10 12:56:12 +01003077 /*
3078 * prevent recursion, since the user stack tracing may
3079 * trigger other kernel events.
3080 */
3081 preempt_disable();
3082 if (__this_cpu_read(user_stack_count))
3083 goto out;
3084
3085 __this_cpu_inc(user_stack_count);
3086
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003087 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3088 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003089 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003090 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003091 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003092
Steven Rostedt48659d32009-09-11 11:36:23 -04003093 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003094 memset(&entry->caller, 0, sizeof(entry->caller));
3095
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003097 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003098 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003099
Li Zefan1dbd1952010-12-09 15:47:56 +08003100 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003101 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003102 out:
3103 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003104}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003105#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003106static void ftrace_trace_userstack(struct trace_array *tr,
3107 struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003108 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003109{
Török Edwin02b67512008-11-22 13:28:47 +02003110}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003111#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003112
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003113#endif /* CONFIG_STACKTRACE */
3114
Steven Rostedt07d777f2011-09-22 14:01:55 -04003115/* created for use with alloc_percpu */
3116struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003117 int nesting;
3118 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003119};
3120
3121static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003122
3123/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003124 * Thise allows for lockless recording. If we're nested too deeply, then
3125 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003126 */
3127static char *get_trace_buf(void)
3128{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003129 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003130
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003131 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003132 return NULL;
3133
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003134 buffer->nesting++;
3135
3136 /* Interrupts must see nesting incremented before we use the buffer */
3137 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003138 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003139}
3140
3141static void put_trace_buf(void)
3142{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003143 /* Don't let the decrement of nesting leak before this */
3144 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003145 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003146}
3147
3148static int alloc_percpu_trace_buffer(void)
3149{
3150 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003151
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003152 if (trace_percpu_buffer)
3153 return 0;
3154
Steven Rostedt07d777f2011-09-22 14:01:55 -04003155 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003156 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003157 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003158
3159 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003160 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003161}
3162
Steven Rostedt81698832012-10-11 10:15:05 -04003163static int buffers_allocated;
3164
Steven Rostedt07d777f2011-09-22 14:01:55 -04003165void trace_printk_init_buffers(void)
3166{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003167 if (buffers_allocated)
3168 return;
3169
3170 if (alloc_percpu_trace_buffer())
3171 return;
3172
Steven Rostedt2184db42014-05-28 13:14:40 -04003173 /* trace_printk() is for debug use only. Don't use it in production. */
3174
Joe Perchesa395d6a2016-03-22 14:28:09 -07003175 pr_warn("\n");
3176 pr_warn("**********************************************************\n");
3177 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3178 pr_warn("** **\n");
3179 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3180 pr_warn("** **\n");
3181 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3182 pr_warn("** unsafe for production use. **\n");
3183 pr_warn("** **\n");
3184 pr_warn("** If you see this message and you are not debugging **\n");
3185 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3186 pr_warn("** **\n");
3187 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3188 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003189
Steven Rostedtb382ede62012-10-10 21:44:34 -04003190 /* Expand the buffers to set size */
3191 tracing_update_buffers();
3192
Steven Rostedt07d777f2011-09-22 14:01:55 -04003193 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003194
3195 /*
3196 * trace_printk_init_buffers() can be called by modules.
3197 * If that happens, then we need to start cmdline recording
3198 * directly here. If the global_trace.buffer is already
3199 * allocated here, then this was called by module code.
3200 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003201 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003202 tracing_start_cmdline_record();
3203}
Divya Indif45d1222019-03-20 11:28:51 -07003204EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003205
3206void trace_printk_start_comm(void)
3207{
3208 /* Start tracing comms if trace printk is set */
3209 if (!buffers_allocated)
3210 return;
3211 tracing_start_cmdline_record();
3212}
3213
3214static void trace_printk_start_stop_comm(int enabled)
3215{
3216 if (!buffers_allocated)
3217 return;
3218
3219 if (enabled)
3220 tracing_start_cmdline_record();
3221 else
3222 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003223}
3224
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003225/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003226 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003227 * @ip: The address of the caller
3228 * @fmt: The string format to write to the buffer
3229 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003230 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003231int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003232{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003233 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003234 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003235 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003236 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003237 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003239 char *tbuffer;
3240 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003241
3242 if (unlikely(tracing_selftest_running || tracing_disabled))
3243 return 0;
3244
3245 /* Don't pollute graph traces with trace_vprintk internals */
3246 pause_graph_tracing();
3247
3248 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003249 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003250
Steven Rostedt07d777f2011-09-22 14:01:55 -04003251 tbuffer = get_trace_buf();
3252 if (!tbuffer) {
3253 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003254 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003255 }
3256
3257 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3258
3259 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003260 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003261
Steven Rostedt07d777f2011-09-22 14:01:55 -04003262 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003263 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003264 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003265 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003266 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3267 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003268 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003269 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003270 entry = ring_buffer_event_data(event);
3271 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003272 entry->fmt = fmt;
3273
Steven Rostedt07d777f2011-09-22 14:01:55 -04003274 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003275 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003276 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003277 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003278 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003279
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003280out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003281 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003282out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003283 put_trace_buf();
3284
3285out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003286 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003287 unpause_graph_tracing();
3288
3289 return len;
3290}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003291EXPORT_SYMBOL_GPL(trace_vbprintk);
3292
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003293__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003294static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003295__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003296 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003297{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003298 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003299 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003300 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003301 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003302 unsigned long flags;
3303 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003304
3305 if (tracing_disabled || tracing_selftest_running)
3306 return 0;
3307
Steven Rostedt07d777f2011-09-22 14:01:55 -04003308 /* Don't pollute graph traces with trace_vprintk internals */
3309 pause_graph_tracing();
3310
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003311 pc = preempt_count();
3312 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003313
Steven Rostedt07d777f2011-09-22 14:01:55 -04003314
3315 tbuffer = get_trace_buf();
3316 if (!tbuffer) {
3317 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003318 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003319 }
3320
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003321 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003322
Steven Rostedt07d777f2011-09-22 14:01:55 -04003323 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003324 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003325 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003326 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3327 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003328 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003329 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003330 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003331 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003332
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003333 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003334 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003335 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003336 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003337 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003338
3339out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003340 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003341 put_trace_buf();
3342
3343out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003344 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003345 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003346
3347 return len;
3348}
Steven Rostedt659372d2009-09-03 19:11:07 -04003349
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003350__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003351int trace_array_vprintk(struct trace_array *tr,
3352 unsigned long ip, const char *fmt, va_list args)
3353{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003354 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003355}
3356
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003357/**
3358 * trace_array_printk - Print a message to a specific instance
3359 * @tr: The instance trace_array descriptor
3360 * @ip: The instruction pointer that this is called from.
3361 * @fmt: The format to print (printf format)
3362 *
3363 * If a subsystem sets up its own instance, they have the right to
3364 * printk strings into their tracing instance buffer using this
3365 * function. Note, this function will not write into the top level
3366 * buffer (use trace_printk() for that), as writing into the top level
3367 * buffer should only have events that can be individually disabled.
3368 * trace_printk() is only used for debugging a kernel, and should not
3369 * be ever encorporated in normal use.
3370 *
3371 * trace_array_printk() can be used, as it will not add noise to the
3372 * top level tracing buffer.
3373 *
3374 * Note, trace_array_init_printk() must be called on @tr before this
3375 * can be used.
3376 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003377__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003378int trace_array_printk(struct trace_array *tr,
3379 unsigned long ip, const char *fmt, ...)
3380{
3381 int ret;
3382 va_list ap;
3383
Divya Indi953ae452019-08-14 10:55:25 -07003384 if (!tr)
3385 return -ENOENT;
3386
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003387 /* This is only allowed for created instances */
3388 if (tr == &global_trace)
3389 return 0;
3390
3391 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3392 return 0;
3393
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003394 va_start(ap, fmt);
3395 ret = trace_array_vprintk(tr, ip, fmt, ap);
3396 va_end(ap);
3397 return ret;
3398}
Divya Indif45d1222019-03-20 11:28:51 -07003399EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003400
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003401/**
3402 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3403 * @tr: The trace array to initialize the buffers for
3404 *
3405 * As trace_array_printk() only writes into instances, they are OK to
3406 * have in the kernel (unlike trace_printk()). This needs to be called
3407 * before trace_array_printk() can be used on a trace_array.
3408 */
3409int trace_array_init_printk(struct trace_array *tr)
3410{
3411 if (!tr)
3412 return -ENOENT;
3413
3414 /* This is only allowed for created instances */
3415 if (tr == &global_trace)
3416 return -EINVAL;
3417
3418 return alloc_percpu_trace_buffer();
3419}
3420EXPORT_SYMBOL_GPL(trace_array_init_printk);
3421
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003422__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003423int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003424 unsigned long ip, const char *fmt, ...)
3425{
3426 int ret;
3427 va_list ap;
3428
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003429 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003430 return 0;
3431
3432 va_start(ap, fmt);
3433 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3434 va_end(ap);
3435 return ret;
3436}
3437
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003438__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003439int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3440{
Steven Rostedta813a152009-10-09 01:41:35 -04003441 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003442}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003443EXPORT_SYMBOL_GPL(trace_vprintk);
3444
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003445static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003446{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003447 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3448
Steven Rostedt5a90f572008-09-03 17:42:51 -04003449 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003450 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003451 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003452}
3453
Ingo Molnare309b412008-05-12 21:20:51 +02003454static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003455peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3456 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003457{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003458 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003459 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003460
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003461 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003462 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003463 if (lost_events)
3464 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3465 (unsigned long)-1 : 0;
3466 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003467 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003468 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003469 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003470
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003471 if (event) {
3472 iter->ent_size = ring_buffer_event_length(event);
3473 return ring_buffer_event_data(event);
3474 }
3475 iter->ent_size = 0;
3476 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003477}
Steven Rostedtd7690412008-10-01 00:29:53 -04003478
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003479static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003480__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3481 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003483 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003485 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003486 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003487 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003489 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003490 int cpu;
3491
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003492 /*
3493 * If we are in a per_cpu trace file, don't bother by iterating over
3494 * all cpu and peek directly.
3495 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003496 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003497 if (ring_buffer_empty_cpu(buffer, cpu_file))
3498 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003499 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003500 if (ent_cpu)
3501 *ent_cpu = cpu_file;
3502
3503 return ent;
3504 }
3505
Steven Rostedtab464282008-05-12 21:21:00 +02003506 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003507
3508 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003509 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003510
Steven Rostedtbc21b472010-03-31 19:49:26 -04003511 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003512
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003513 /*
3514 * Pick the entry with the smallest timestamp:
3515 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003516 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003517 next = ent;
3518 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003519 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003520 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003521 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522 }
3523 }
3524
Steven Rostedt12b5da32012-03-27 10:43:28 -04003525 iter->ent_size = next_size;
3526
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003527 if (ent_cpu)
3528 *ent_cpu = next_cpu;
3529
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003530 if (ent_ts)
3531 *ent_ts = next_ts;
3532
Steven Rostedtbc21b472010-03-31 19:49:26 -04003533 if (missing_events)
3534 *missing_events = next_lost;
3535
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536 return next;
3537}
3538
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003539#define STATIC_TEMP_BUF_SIZE 128
Minchan Kim8fa655a2020-11-25 14:56:54 -08003540static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003541
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003542/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003543struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3544 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003545{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003546 /* __find_next_entry will reset ent_size */
3547 int ent_size = iter->ent_size;
3548 struct trace_entry *entry;
3549
3550 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003551 * If called from ftrace_dump(), then the iter->temp buffer
3552 * will be the static_temp_buf and not created from kmalloc.
3553 * If the entry size is greater than the buffer, we can
3554 * not save it. Just return NULL in that case. This is only
3555 * used to add markers when two consecutive events' time
3556 * stamps have a large delta. See trace_print_lat_context()
3557 */
3558 if (iter->temp == static_temp_buf &&
3559 STATIC_TEMP_BUF_SIZE < ent_size)
3560 return NULL;
3561
3562 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003563 * The __find_next_entry() may call peek_next_entry(), which may
3564 * call ring_buffer_peek() that may make the contents of iter->ent
3565 * undefined. Need to copy iter->ent now.
3566 */
3567 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003568 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3569 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003570 void *temp;
3571 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3572 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003573 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003574 kfree(iter->temp);
3575 iter->temp = temp;
3576 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003577 }
3578 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003579 iter->ent = iter->temp;
3580 }
3581 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3582 /* Put back the original ent_size */
3583 iter->ent_size = ent_size;
3584
3585 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003586}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003587
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003588/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003589void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003590{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003591 iter->ent = __find_next_entry(iter, &iter->cpu,
3592 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003593
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003594 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003595 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003596
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003597 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003598}
3599
Ingo Molnare309b412008-05-12 21:20:51 +02003600static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003601{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003602 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003603 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604}
3605
Ingo Molnare309b412008-05-12 21:20:51 +02003606static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003607{
3608 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003610 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003611
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003612 WARN_ON_ONCE(iter->leftover);
3613
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003614 (*pos)++;
3615
3616 /* can't go backwards */
3617 if (iter->idx > i)
3618 return NULL;
3619
3620 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003621 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003622 else
3623 ent = iter;
3624
3625 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003626 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627
3628 iter->pos = *pos;
3629
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003630 return ent;
3631}
3632
Jason Wessel955b61e2010-08-05 09:22:23 -05003633void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003634{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003635 struct ring_buffer_iter *buf_iter;
3636 unsigned long entries = 0;
3637 u64 ts;
3638
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003639 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003640
Steven Rostedt6d158a82012-06-27 20:46:14 -04003641 buf_iter = trace_buffer_iter(iter, cpu);
3642 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003643 return;
3644
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003645 ring_buffer_iter_reset(buf_iter);
3646
3647 /*
3648 * We could have the case with the max latency tracers
3649 * that a reset never took place on a cpu. This is evident
3650 * by the timestamp being before the start of the buffer.
3651 */
YangHui69243722020-06-16 11:36:46 +08003652 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003653 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003654 break;
3655 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003656 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003657 }
3658
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003659 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003660}
3661
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003662/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003663 * The current tracer is copied to avoid a global locking
3664 * all around.
3665 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003666static void *s_start(struct seq_file *m, loff_t *pos)
3667{
3668 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003669 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003670 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003671 void *p = NULL;
3672 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003673 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003674
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003675 /*
3676 * copy the tracer to avoid using a global lock all around.
3677 * iter->trace is a copy of current_trace, the pointer to the
3678 * name may be used instead of a strcmp(), as iter->trace->name
3679 * will point to the same string as current_trace->name.
3680 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003681 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003682 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3683 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003684 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003685
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003686#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003687 if (iter->snapshot && iter->trace->use_max_tr)
3688 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003689#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003690
3691 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003692 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003693
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003694 if (*pos != iter->pos) {
3695 iter->ent = NULL;
3696 iter->cpu = 0;
3697 iter->idx = -1;
3698
Steven Rostedtae3b5092013-01-23 15:22:59 -05003699 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003700 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003701 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003702 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003703 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003704
Lai Jiangshanac91d852010-03-02 17:54:50 +08003705 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003706 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3707 ;
3708
3709 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003710 /*
3711 * If we overflowed the seq_file before, then we want
3712 * to just reuse the trace_seq buffer again.
3713 */
3714 if (iter->leftover)
3715 p = iter;
3716 else {
3717 l = *pos - 1;
3718 p = s_next(m, p, &l);
3719 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003720 }
3721
Lai Jiangshan4f535962009-05-18 19:35:34 +08003722 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003723 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003724 return p;
3725}
3726
3727static void s_stop(struct seq_file *m, void *p)
3728{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003729 struct trace_iterator *iter = m->private;
3730
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003731#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003732 if (iter->snapshot && iter->trace->use_max_tr)
3733 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003734#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003735
3736 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003737 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003738
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003739 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003740 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003741}
3742
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003743static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003744get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003745 unsigned long *entries, int cpu)
3746{
3747 unsigned long count;
3748
3749 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3750 /*
3751 * If this buffer has skipped entries, then we hold all
3752 * entries for the trace and we need to ignore the
3753 * ones before the time stamp.
3754 */
3755 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3756 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3757 /* total is the same as the entries */
3758 *total = count;
3759 } else
3760 *total = count +
3761 ring_buffer_overrun_cpu(buf->buffer, cpu);
3762 *entries = count;
3763}
3764
3765static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003766get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003767 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003768{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003769 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003770 int cpu;
3771
3772 *total = 0;
3773 *entries = 0;
3774
3775 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003776 get_total_entries_cpu(buf, &t, &e, cpu);
3777 *total += t;
3778 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003779 }
3780}
3781
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003782unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3783{
3784 unsigned long total, entries;
3785
3786 if (!tr)
3787 tr = &global_trace;
3788
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003789 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003790
3791 return entries;
3792}
3793
3794unsigned long trace_total_entries(struct trace_array *tr)
3795{
3796 unsigned long total, entries;
3797
3798 if (!tr)
3799 tr = &global_trace;
3800
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003801 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003802
3803 return entries;
3804}
3805
Ingo Molnare309b412008-05-12 21:20:51 +02003806static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003807{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003808 seq_puts(m, "# _------=> CPU# \n"
3809 "# / _-----=> irqs-off \n"
3810 "# | / _----=> need-resched \n"
3811 "# || / _---=> hardirq/softirq \n"
3812 "# ||| / _--=> preempt-depth \n"
3813 "# |||| / delay \n"
3814 "# cmd pid ||||| time | caller \n"
3815 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003816}
3817
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003818static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003819{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003820 unsigned long total;
3821 unsigned long entries;
3822
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003823 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003824 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3825 entries, total, num_online_cpus());
3826 seq_puts(m, "#\n");
3827}
3828
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003829static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003830 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003831{
Joel Fernandes441dae82017-06-25 22:38:43 -07003832 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3833
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003834 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003835
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003836 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3837 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003838}
3839
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003840static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003841 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003842{
Joel Fernandes441dae82017-06-25 22:38:43 -07003843 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003844 const char *space = " ";
3845 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003846
Quentin Perret9e738212019-02-14 15:29:50 +00003847 print_event_info(buf, m);
3848
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003849 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3850 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3851 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3852 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3853 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3854 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3855 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003856}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003857
Jiri Olsa62b915f2010-04-02 19:01:22 +02003858void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003859print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3860{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003861 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003862 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003863 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003864 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003865 unsigned long entries;
3866 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867 const char *name = "preemption";
3868
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003869 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003870
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003871 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003872
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003873 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003874 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003875 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003876 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003877 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003878 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003879 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003880 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003881 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003882 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003883#if defined(CONFIG_PREEMPT_NONE)
3884 "server",
3885#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3886 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003887#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003888 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003889#elif defined(CONFIG_PREEMPT_RT)
3890 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003891#else
3892 "unknown",
3893#endif
3894 /* These are reserved for later use */
3895 0, 0, 0, 0);
3896#ifdef CONFIG_SMP
3897 seq_printf(m, " #P:%d)\n", num_online_cpus());
3898#else
3899 seq_puts(m, ")\n");
3900#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003901 seq_puts(m, "# -----------------\n");
3902 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003903 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003904 data->comm, data->pid,
3905 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003906 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003907 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003908
3909 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003910 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003911 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3912 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003913 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003914 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3915 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003916 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917 }
3918
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003919 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920}
3921
Steven Rostedta3097202008-11-07 22:36:02 -05003922static void test_cpu_buff_start(struct trace_iterator *iter)
3923{
3924 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003925 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003926
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003927 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003928 return;
3929
3930 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3931 return;
3932
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003933 if (cpumask_available(iter->started) &&
3934 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003935 return;
3936
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003937 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003938 return;
3939
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003940 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003941 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003942
3943 /* Don't print started cpu buffer for the first entry of the trace */
3944 if (iter->idx > 1)
3945 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3946 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003947}
3948
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003949static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003950{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003951 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003952 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003953 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003954 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003955 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003956
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003957 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003958
Steven Rostedta3097202008-11-07 22:36:02 -05003959 test_cpu_buff_start(iter);
3960
Steven Rostedtf633cef2008-12-23 23:24:13 -05003961 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003962
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003963 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003964 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3965 trace_print_lat_context(iter);
3966 else
3967 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003968 }
3969
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003970 if (trace_seq_has_overflowed(s))
3971 return TRACE_TYPE_PARTIAL_LINE;
3972
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003973 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003974 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003975
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003976 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003977
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003978 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003979}
3980
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003981static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003982{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003983 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003984 struct trace_seq *s = &iter->seq;
3985 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003986 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003987
3988 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003989
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003990 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003991 trace_seq_printf(s, "%d %d %llu ",
3992 entry->pid, iter->cpu, iter->ts);
3993
3994 if (trace_seq_has_overflowed(s))
3995 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003996
Steven Rostedtf633cef2008-12-23 23:24:13 -05003997 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003998 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003999 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004000
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004001 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004002
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004003 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004004}
4005
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004006static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004007{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004008 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004009 struct trace_seq *s = &iter->seq;
4010 unsigned char newline = '\n';
4011 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004012 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004013
4014 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004015
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004016 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004017 SEQ_PUT_HEX_FIELD(s, entry->pid);
4018 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4019 SEQ_PUT_HEX_FIELD(s, iter->ts);
4020 if (trace_seq_has_overflowed(s))
4021 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004022 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004023
Steven Rostedtf633cef2008-12-23 23:24:13 -05004024 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004025 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004026 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004027 if (ret != TRACE_TYPE_HANDLED)
4028 return ret;
4029 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004030
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004031 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004032
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004033 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004034}
4035
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004036static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004037{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004038 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004039 struct trace_seq *s = &iter->seq;
4040 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004041 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004042
4043 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004044
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004045 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004046 SEQ_PUT_FIELD(s, entry->pid);
4047 SEQ_PUT_FIELD(s, iter->cpu);
4048 SEQ_PUT_FIELD(s, iter->ts);
4049 if (trace_seq_has_overflowed(s))
4050 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004051 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004052
Steven Rostedtf633cef2008-12-23 23:24:13 -05004053 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004054 return event ? event->funcs->binary(iter, 0, event) :
4055 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004056}
4057
Jiri Olsa62b915f2010-04-02 19:01:22 +02004058int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004059{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004060 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004061 int cpu;
4062
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004063 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004064 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004065 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004066 buf_iter = trace_buffer_iter(iter, cpu);
4067 if (buf_iter) {
4068 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004069 return 0;
4070 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004071 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004072 return 0;
4073 }
4074 return 1;
4075 }
4076
Steven Rostedtab464282008-05-12 21:21:00 +02004077 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004078 buf_iter = trace_buffer_iter(iter, cpu);
4079 if (buf_iter) {
4080 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004081 return 0;
4082 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004083 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004084 return 0;
4085 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004087
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004088 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004089}
4090
Lai Jiangshan4f535962009-05-18 19:35:34 +08004091/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004092enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004093{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004094 struct trace_array *tr = iter->tr;
4095 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004096 enum print_line_t ret;
4097
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004098 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004099 if (iter->lost_events == (unsigned long)-1)
4100 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4101 iter->cpu);
4102 else
4103 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4104 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004105 if (trace_seq_has_overflowed(&iter->seq))
4106 return TRACE_TYPE_PARTIAL_LINE;
4107 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004108
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004109 if (iter->trace && iter->trace->print_line) {
4110 ret = iter->trace->print_line(iter);
4111 if (ret != TRACE_TYPE_UNHANDLED)
4112 return ret;
4113 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004114
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004115 if (iter->ent->type == TRACE_BPUTS &&
4116 trace_flags & TRACE_ITER_PRINTK &&
4117 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4118 return trace_print_bputs_msg_only(iter);
4119
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004120 if (iter->ent->type == TRACE_BPRINT &&
4121 trace_flags & TRACE_ITER_PRINTK &&
4122 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004123 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004124
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004125 if (iter->ent->type == TRACE_PRINT &&
4126 trace_flags & TRACE_ITER_PRINTK &&
4127 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004128 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004129
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004130 if (trace_flags & TRACE_ITER_BIN)
4131 return print_bin_fmt(iter);
4132
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004133 if (trace_flags & TRACE_ITER_HEX)
4134 return print_hex_fmt(iter);
4135
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004136 if (trace_flags & TRACE_ITER_RAW)
4137 return print_raw_fmt(iter);
4138
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004139 return print_trace_fmt(iter);
4140}
4141
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004142void trace_latency_header(struct seq_file *m)
4143{
4144 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004145 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004146
4147 /* print nothing if the buffers are empty */
4148 if (trace_empty(iter))
4149 return;
4150
4151 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4152 print_trace_header(m, iter);
4153
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004154 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004155 print_lat_help_header(m);
4156}
4157
Jiri Olsa62b915f2010-04-02 19:01:22 +02004158void trace_default_header(struct seq_file *m)
4159{
4160 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004161 struct trace_array *tr = iter->tr;
4162 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004163
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004164 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4165 return;
4166
Jiri Olsa62b915f2010-04-02 19:01:22 +02004167 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4168 /* print nothing if the buffers are empty */
4169 if (trace_empty(iter))
4170 return;
4171 print_trace_header(m, iter);
4172 if (!(trace_flags & TRACE_ITER_VERBOSE))
4173 print_lat_help_header(m);
4174 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004175 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4176 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004177 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004178 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004179 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004180 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004181 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004182 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004183 }
4184}
4185
Steven Rostedte0a413f2011-09-29 21:26:16 -04004186static void test_ftrace_alive(struct seq_file *m)
4187{
4188 if (!ftrace_is_dead())
4189 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004190 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4191 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004192}
4193
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004194#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004195static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004196{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004197 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4198 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4199 "# Takes a snapshot of the main buffer.\n"
4200 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4201 "# (Doesn't have to be '2' works with any number that\n"
4202 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004203}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004204
4205static void show_snapshot_percpu_help(struct seq_file *m)
4206{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004207 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004208#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004209 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4210 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004211#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004212 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4213 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004214#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004215 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4216 "# (Doesn't have to be '2' works with any number that\n"
4217 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004218}
4219
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004220static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4221{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004222 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004223 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004224 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004225 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004226
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004227 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004228 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4229 show_snapshot_main_help(m);
4230 else
4231 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004232}
4233#else
4234/* Should never be called */
4235static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4236#endif
4237
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004238static int s_show(struct seq_file *m, void *v)
4239{
4240 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004241 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004242
4243 if (iter->ent == NULL) {
4244 if (iter->tr) {
4245 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4246 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004247 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004249 if (iter->snapshot && trace_empty(iter))
4250 print_snapshot_help(m, iter);
4251 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004252 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004253 else
4254 trace_default_header(m);
4255
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004256 } else if (iter->leftover) {
4257 /*
4258 * If we filled the seq_file buffer earlier, we
4259 * want to just show it now.
4260 */
4261 ret = trace_print_seq(m, &iter->seq);
4262
4263 /* ret should this time be zero, but you never know */
4264 iter->leftover = ret;
4265
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004266 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004267 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004268 ret = trace_print_seq(m, &iter->seq);
4269 /*
4270 * If we overflow the seq_file buffer, then it will
4271 * ask us for this data again at start up.
4272 * Use that instead.
4273 * ret is 0 if seq_file write succeeded.
4274 * -1 otherwise.
4275 */
4276 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004277 }
4278
4279 return 0;
4280}
4281
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004282/*
4283 * Should be used after trace_array_get(), trace_types_lock
4284 * ensures that i_cdev was already initialized.
4285 */
4286static inline int tracing_get_cpu(struct inode *inode)
4287{
4288 if (inode->i_cdev) /* See trace_create_cpu_file() */
4289 return (long)inode->i_cdev - 1;
4290 return RING_BUFFER_ALL_CPUS;
4291}
4292
James Morris88e9d342009-09-22 16:43:43 -07004293static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004294 .start = s_start,
4295 .next = s_next,
4296 .stop = s_stop,
4297 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004298};
4299
Ingo Molnare309b412008-05-12 21:20:51 +02004300static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004301__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004302{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004303 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004304 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004305 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004306
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004307 if (tracing_disabled)
4308 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004309
Jiri Olsa50e18b92012-04-25 10:23:39 +02004310 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004311 if (!iter)
4312 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004313
Gil Fruchter72917232015-06-09 10:32:35 +03004314 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004315 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004316 if (!iter->buffer_iter)
4317 goto release;
4318
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004319 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004320 * trace_find_next_entry() may need to save off iter->ent.
4321 * It will place it into the iter->temp buffer. As most
4322 * events are less than 128, allocate a buffer of that size.
4323 * If one is greater, then trace_find_next_entry() will
4324 * allocate a new buffer to adjust for the bigger iter->ent.
4325 * It's not critical if it fails to get allocated here.
4326 */
4327 iter->temp = kmalloc(128, GFP_KERNEL);
4328 if (iter->temp)
4329 iter->temp_size = 128;
4330
4331 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004332 * We make a copy of the current tracer to avoid concurrent
4333 * changes on it while we are reading.
4334 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004335 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004337 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004338 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004339
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004340 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004341
Li Zefan79f55992009-06-15 14:58:26 +08004342 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004343 goto fail;
4344
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004345 iter->tr = tr;
4346
4347#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004348 /* Currently only the top directory has a snapshot */
4349 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004350 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004351 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004352#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004353 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004354 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004355 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004356 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004357 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004358
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004359 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004360 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004361 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004362
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004363 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004364 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004365 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4366
David Sharp8be07092012-11-13 12:18:22 -08004367 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004368 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004369 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4370
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004371 /*
4372 * If pause-on-trace is enabled, then stop the trace while
4373 * dumping, unless this is the "snapshot" file
4374 */
4375 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004376 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004377
Steven Rostedtae3b5092013-01-23 15:22:59 -05004378 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004379 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004380 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004381 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004382 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004383 }
4384 ring_buffer_read_prepare_sync();
4385 for_each_tracing_cpu(cpu) {
4386 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004387 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004388 }
4389 } else {
4390 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004391 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004392 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004393 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004394 ring_buffer_read_prepare_sync();
4395 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004396 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004397 }
4398
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004399 mutex_unlock(&trace_types_lock);
4400
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004401 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004402
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004403 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004404 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004405 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004406 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004407 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004408release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004409 seq_release_private(inode, file);
4410 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004411}
4412
4413int tracing_open_generic(struct inode *inode, struct file *filp)
4414{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004415 int ret;
4416
4417 ret = tracing_check_open_get_tr(NULL);
4418 if (ret)
4419 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004420
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004421 filp->private_data = inode->i_private;
4422 return 0;
4423}
4424
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004425bool tracing_is_disabled(void)
4426{
4427 return (tracing_disabled) ? true: false;
4428}
4429
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004430/*
4431 * Open and update trace_array ref count.
4432 * Must have the current trace_array passed to it.
4433 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004434int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004435{
4436 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004437 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004438
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004439 ret = tracing_check_open_get_tr(tr);
4440 if (ret)
4441 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004442
4443 filp->private_data = inode->i_private;
4444
4445 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004446}
4447
Hannes Eder4fd27352009-02-10 19:44:12 +01004448static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004449{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004450 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004451 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004452 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004453 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004454
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004455 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004456 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004457 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004458 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004459
Oleg Nesterov6484c712013-07-23 17:26:10 +02004460 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004461 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004462 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004463
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004464 for_each_tracing_cpu(cpu) {
4465 if (iter->buffer_iter[cpu])
4466 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4467 }
4468
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004469 if (iter->trace && iter->trace->close)
4470 iter->trace->close(iter);
4471
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004472 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004473 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004474 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004475
4476 __trace_array_put(tr);
4477
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004478 mutex_unlock(&trace_types_lock);
4479
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004480 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004481 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004482 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004483 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004484 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004485 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004486
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004487 return 0;
4488}
4489
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004490static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4491{
4492 struct trace_array *tr = inode->i_private;
4493
4494 trace_array_put(tr);
4495 return 0;
4496}
4497
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004498static int tracing_single_release_tr(struct inode *inode, struct file *file)
4499{
4500 struct trace_array *tr = inode->i_private;
4501
4502 trace_array_put(tr);
4503
4504 return single_release(inode, file);
4505}
4506
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004507static int tracing_open(struct inode *inode, struct file *file)
4508{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004509 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004510 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004511 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004512
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004513 ret = tracing_check_open_get_tr(tr);
4514 if (ret)
4515 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004516
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004517 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004518 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4519 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004520 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004521
4522#ifdef CONFIG_TRACER_MAX_TRACE
4523 if (tr->current_trace->print_max)
4524 trace_buf = &tr->max_buffer;
4525#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004526
4527 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004528 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004529 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004530 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004531 }
4532
4533 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004534 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004535 if (IS_ERR(iter))
4536 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004537 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004538 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4539 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004540
4541 if (ret < 0)
4542 trace_array_put(tr);
4543
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004544 return ret;
4545}
4546
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004547/*
4548 * Some tracers are not suitable for instance buffers.
4549 * A tracer is always available for the global array (toplevel)
4550 * or if it explicitly states that it is.
4551 */
4552static bool
4553trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4554{
4555 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4556}
4557
4558/* Find the next tracer that this trace array may use */
4559static struct tracer *
4560get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4561{
4562 while (t && !trace_ok_for_array(t, tr))
4563 t = t->next;
4564
4565 return t;
4566}
4567
Ingo Molnare309b412008-05-12 21:20:51 +02004568static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004569t_next(struct seq_file *m, void *v, loff_t *pos)
4570{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004571 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004572 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004573
4574 (*pos)++;
4575
4576 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004577 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004578
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004579 return t;
4580}
4581
4582static void *t_start(struct seq_file *m, loff_t *pos)
4583{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004584 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004585 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004586 loff_t l = 0;
4587
4588 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004589
4590 t = get_tracer_for_array(tr, trace_types);
4591 for (; t && l < *pos; t = t_next(m, t, &l))
4592 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004593
4594 return t;
4595}
4596
4597static void t_stop(struct seq_file *m, void *p)
4598{
4599 mutex_unlock(&trace_types_lock);
4600}
4601
4602static int t_show(struct seq_file *m, void *v)
4603{
4604 struct tracer *t = v;
4605
4606 if (!t)
4607 return 0;
4608
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004609 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004610 if (t->next)
4611 seq_putc(m, ' ');
4612 else
4613 seq_putc(m, '\n');
4614
4615 return 0;
4616}
4617
James Morris88e9d342009-09-22 16:43:43 -07004618static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004619 .start = t_start,
4620 .next = t_next,
4621 .stop = t_stop,
4622 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004623};
4624
4625static int show_traces_open(struct inode *inode, struct file *file)
4626{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004627 struct trace_array *tr = inode->i_private;
4628 struct seq_file *m;
4629 int ret;
4630
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004631 ret = tracing_check_open_get_tr(tr);
4632 if (ret)
4633 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004634
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004635 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004636 if (ret) {
4637 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004638 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004639 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004640
4641 m = file->private_data;
4642 m->private = tr;
4643
4644 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004645}
4646
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004647static int show_traces_release(struct inode *inode, struct file *file)
4648{
4649 struct trace_array *tr = inode->i_private;
4650
4651 trace_array_put(tr);
4652 return seq_release(inode, file);
4653}
4654
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004655static ssize_t
4656tracing_write_stub(struct file *filp, const char __user *ubuf,
4657 size_t count, loff_t *ppos)
4658{
4659 return count;
4660}
4661
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004662loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004663{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004664 int ret;
4665
Slava Pestov364829b2010-11-24 15:13:16 -08004666 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004667 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004668 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004669 file->f_pos = ret = 0;
4670
4671 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004672}
4673
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004674static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004675 .open = tracing_open,
4676 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004677 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004678 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004679 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004680};
4681
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004682static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004683 .open = show_traces_open,
4684 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004685 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004686 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004687};
4688
4689static ssize_t
4690tracing_cpumask_read(struct file *filp, char __user *ubuf,
4691 size_t count, loff_t *ppos)
4692{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004693 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004694 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004695 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004696
Changbin Du90e406f2017-11-30 11:39:43 +08004697 len = snprintf(NULL, 0, "%*pb\n",
4698 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4699 mask_str = kmalloc(len, GFP_KERNEL);
4700 if (!mask_str)
4701 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004702
Changbin Du90e406f2017-11-30 11:39:43 +08004703 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004704 cpumask_pr_args(tr->tracing_cpumask));
4705 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004706 count = -EINVAL;
4707 goto out_err;
4708 }
Changbin Du90e406f2017-11-30 11:39:43 +08004709 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004710
4711out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004712 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004713
4714 return count;
4715}
4716
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004717int tracing_set_cpumask(struct trace_array *tr,
4718 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004719{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004720 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304721
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004722 if (!tr)
4723 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004724
Steven Rostedta5e25882008-12-02 15:34:05 -05004725 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004726 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004727 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004728 /*
4729 * Increase/decrease the disabled counter if we are
4730 * about to flip a bit in the cpumask:
4731 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004732 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304733 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004734 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4735 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004736 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004737 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304738 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004739 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4740 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004741 }
4742 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004743 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004744 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004745
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004746 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004747
4748 return 0;
4749}
4750
4751static ssize_t
4752tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4753 size_t count, loff_t *ppos)
4754{
4755 struct trace_array *tr = file_inode(filp)->i_private;
4756 cpumask_var_t tracing_cpumask_new;
4757 int err;
4758
4759 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4760 return -ENOMEM;
4761
4762 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4763 if (err)
4764 goto err_free;
4765
4766 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4767 if (err)
4768 goto err_free;
4769
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304770 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004771
Ingo Molnarc7078de2008-05-12 21:20:52 +02004772 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004773
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004774err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004775 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004776
4777 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004778}
4779
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004780static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004781 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004782 .read = tracing_cpumask_read,
4783 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004784 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004785 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004786};
4787
Li Zefanfdb372e2009-12-08 11:15:59 +08004788static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004789{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004790 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004791 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004792 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004793 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004794
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004795 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004796 tracer_flags = tr->current_trace->flags->val;
4797 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004798
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004799 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004800 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004801 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004802 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004803 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004804 }
4805
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004806 for (i = 0; trace_opts[i].name; i++) {
4807 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004808 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004809 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004810 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004811 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004812 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004813
Li Zefanfdb372e2009-12-08 11:15:59 +08004814 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004815}
4816
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004817static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004818 struct tracer_flags *tracer_flags,
4819 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004820{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004821 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004822 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004823
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004824 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004825 if (ret)
4826 return ret;
4827
4828 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004829 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004830 else
Zhaolei77708412009-08-07 18:53:21 +08004831 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004832 return 0;
4833}
4834
Li Zefan8d18eaa2009-12-08 11:17:06 +08004835/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004836static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004837{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004838 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004839 struct tracer_flags *tracer_flags = trace->flags;
4840 struct tracer_opt *opts = NULL;
4841 int i;
4842
4843 for (i = 0; tracer_flags->opts[i].name; i++) {
4844 opts = &tracer_flags->opts[i];
4845
4846 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004847 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004848 }
4849
4850 return -EINVAL;
4851}
4852
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004853/* Some tracers require overwrite to stay enabled */
4854int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4855{
4856 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4857 return -1;
4858
4859 return 0;
4860}
4861
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004862int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004863{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004864 if ((mask == TRACE_ITER_RECORD_TGID) ||
4865 (mask == TRACE_ITER_RECORD_CMD))
4866 lockdep_assert_held(&event_mutex);
4867
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004868 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004869 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004870 return 0;
4871
4872 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004873 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004874 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004875 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004876
4877 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004878 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004879 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004880 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004881
4882 if (mask == TRACE_ITER_RECORD_CMD)
4883 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004884
Joel Fernandesd914ba32017-06-26 19:01:55 -07004885 if (mask == TRACE_ITER_RECORD_TGID) {
4886 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004887 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004888 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004889 GFP_KERNEL);
4890 if (!tgid_map) {
4891 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4892 return -ENOMEM;
4893 }
4894
4895 trace_event_enable_tgid_record(enabled);
4896 }
4897
Steven Rostedtc37775d2016-04-13 16:59:18 -04004898 if (mask == TRACE_ITER_EVENT_FORK)
4899 trace_event_follow_fork(tr, enabled);
4900
Namhyung Kim1e104862017-04-17 11:44:28 +09004901 if (mask == TRACE_ITER_FUNC_FORK)
4902 ftrace_pid_follow_fork(tr, enabled);
4903
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004904 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004905 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004906#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004907 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004908#endif
4909 }
Steven Rostedt81698832012-10-11 10:15:05 -04004910
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004911 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004912 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004913 trace_printk_control(enabled);
4914 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004915
4916 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004917}
4918
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004919int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004920{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004921 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004922 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004923 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004924 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004925 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004926
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004927 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004928
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004929 len = str_has_prefix(cmp, "no");
4930 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004931 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004932
4933 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004934
Prateek Sood3a53acf2019-12-10 09:15:16 +00004935 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004936 mutex_lock(&trace_types_lock);
4937
Yisheng Xie591a0332018-05-17 16:36:03 +08004938 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004939 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004940 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004941 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004942 else
4943 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004944
4945 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004946 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004947
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004948 /*
4949 * If the first trailing whitespace is replaced with '\0' by strstrip,
4950 * turn it back into a space.
4951 */
4952 if (orig_len > strlen(option))
4953 option[strlen(option)] = ' ';
4954
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004955 return ret;
4956}
4957
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004958static void __init apply_trace_boot_options(void)
4959{
4960 char *buf = trace_boot_options_buf;
4961 char *option;
4962
4963 while (true) {
4964 option = strsep(&buf, ",");
4965
4966 if (!option)
4967 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004968
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004969 if (*option)
4970 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004971
4972 /* Put back the comma to allow this to be called again */
4973 if (buf)
4974 *(buf - 1) = ',';
4975 }
4976}
4977
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004978static ssize_t
4979tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4980 size_t cnt, loff_t *ppos)
4981{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004982 struct seq_file *m = filp->private_data;
4983 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004984 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004985 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004986
4987 if (cnt >= sizeof(buf))
4988 return -EINVAL;
4989
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004990 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004991 return -EFAULT;
4992
Steven Rostedta8dd2172013-01-09 20:54:17 -05004993 buf[cnt] = 0;
4994
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004995 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004996 if (ret < 0)
4997 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004998
Jiri Olsacf8517c2009-10-23 19:36:16 -04004999 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005000
5001 return cnt;
5002}
5003
Li Zefanfdb372e2009-12-08 11:15:59 +08005004static int tracing_trace_options_open(struct inode *inode, struct file *file)
5005{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005006 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005007 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005008
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005009 ret = tracing_check_open_get_tr(tr);
5010 if (ret)
5011 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005012
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005013 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5014 if (ret < 0)
5015 trace_array_put(tr);
5016
5017 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005018}
5019
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005020static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005021 .open = tracing_trace_options_open,
5022 .read = seq_read,
5023 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005024 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005025 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005026};
5027
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005028static const char readme_msg[] =
5029 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005030 "# echo 0 > tracing_on : quick way to disable tracing\n"
5031 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5032 " Important files:\n"
5033 " trace\t\t\t- The static contents of the buffer\n"
5034 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5035 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5036 " current_tracer\t- function and latency tracers\n"
5037 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005038 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005039 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5040 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5041 " trace_clock\t\t-change the clock used to order events\n"
5042 " local: Per cpu clock but may not be synced across CPUs\n"
5043 " global: Synced across CPUs but slows tracing down.\n"
5044 " counter: Not a clock, but just an increment\n"
5045 " uptime: Jiffy counter from time of boot\n"
5046 " perf: Same clock that perf events use\n"
5047#ifdef CONFIG_X86_64
5048 " x86-tsc: TSC cycle counter\n"
5049#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005050 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5051 " delta: Delta difference against a buffer-wide timestamp\n"
5052 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005053 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005054 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005055 " tracing_cpumask\t- Limit which CPUs to trace\n"
5056 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5057 "\t\t\t Remove sub-buffer with rmdir\n"
5058 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005059 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005060 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005061 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005062#ifdef CONFIG_DYNAMIC_FTRACE
5063 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005064 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5065 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005066 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005067 "\t modules: Can select a group via module\n"
5068 "\t Format: :mod:<module-name>\n"
5069 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5070 "\t triggers: a command to perform when function is hit\n"
5071 "\t Format: <function>:<trigger>[:count]\n"
5072 "\t trigger: traceon, traceoff\n"
5073 "\t\t enable_event:<system>:<event>\n"
5074 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005075#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005076 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005077#endif
5078#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005079 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005080#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005081 "\t\t dump\n"
5082 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005083 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5084 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5085 "\t The first one will disable tracing every time do_fault is hit\n"
5086 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5087 "\t The first time do trap is hit and it disables tracing, the\n"
5088 "\t counter will decrement to 2. If tracing is already disabled,\n"
5089 "\t the counter will not decrement. It only decrements when the\n"
5090 "\t trigger did work\n"
5091 "\t To remove trigger without count:\n"
5092 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5093 "\t To remove trigger with a count:\n"
5094 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005095 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005096 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5097 "\t modules: Can select a group via module command :mod:\n"
5098 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005099#endif /* CONFIG_DYNAMIC_FTRACE */
5100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005101 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5102 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005103 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5104 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005105#endif
5106#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5107 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005108 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005109 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5110#endif
5111#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005112 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5113 "\t\t\t snapshot buffer. Read the contents for more\n"
5114 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005115#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005116#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005117 " stack_trace\t\t- Shows the max stack trace when active\n"
5118 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005119 "\t\t\t Write into this file to reset the max size (trigger a\n"
5120 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005121#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005122 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5123 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005124#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005125#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005126#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005127 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005128 "\t\t\t Write into this file to define/undefine new trace events.\n"
5129#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005130#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005131 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005132 "\t\t\t Write into this file to define/undefine new trace events.\n"
5133#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005134#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005135 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005136 "\t\t\t Write into this file to define/undefine new trace events.\n"
5137#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005138#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005139 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005140 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5141 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005142#ifdef CONFIG_HIST_TRIGGERS
5143 "\t s:[synthetic/]<event> <field> [<field>]\n"
5144#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005145 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005146#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005147 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005148 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005149#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005150#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005151 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005152#endif
5153 "\t args: <name>=fetcharg[:type]\n"
5154 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005155#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005156 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005157#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005158 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005159#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005160 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005161 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005162 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005163 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005164#ifdef CONFIG_HIST_TRIGGERS
5165 "\t field: <stype> <name>;\n"
5166 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5167 "\t [unsigned] char/int/long\n"
5168#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005169#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005170 " events/\t\t- Directory containing all trace event subsystems:\n"
5171 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5172 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005173 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5174 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005175 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005176 " events/<system>/<event>/\t- Directory containing control files for\n"
5177 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005178 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5179 " filter\t\t- If set, only events passing filter are traced\n"
5180 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005181 "\t Format: <trigger>[:count][if <filter>]\n"
5182 "\t trigger: traceon, traceoff\n"
5183 "\t enable_event:<system>:<event>\n"
5184 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005185#ifdef CONFIG_HIST_TRIGGERS
5186 "\t enable_hist:<system>:<event>\n"
5187 "\t disable_hist:<system>:<event>\n"
5188#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005189#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005190 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005191#endif
5192#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005193 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005194#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005195#ifdef CONFIG_HIST_TRIGGERS
5196 "\t\t hist (see below)\n"
5197#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005198 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5199 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5200 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5201 "\t events/block/block_unplug/trigger\n"
5202 "\t The first disables tracing every time block_unplug is hit.\n"
5203 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5204 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5205 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5206 "\t Like function triggers, the counter is only decremented if it\n"
5207 "\t enabled or disabled tracing.\n"
5208 "\t To remove a trigger without a count:\n"
5209 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5210 "\t To remove a trigger with a count:\n"
5211 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5212 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005213#ifdef CONFIG_HIST_TRIGGERS
5214 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005215 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005216 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005217 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005218 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005219 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005220 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005221 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005222 "\t [if <filter>]\n\n"
5223 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005224 "\t table using the key(s) and value(s) named, and the value of a\n"
5225 "\t sum called 'hitcount' is incremented. Keys and values\n"
5226 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005227 "\t can be any field, or the special string 'stacktrace'.\n"
5228 "\t Compound keys consisting of up to two fields can be specified\n"
5229 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5230 "\t fields. Sort keys consisting of up to two fields can be\n"
5231 "\t specified using the 'sort' keyword. The sort direction can\n"
5232 "\t be modified by appending '.descending' or '.ascending' to a\n"
5233 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005234 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5235 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5236 "\t its histogram data will be shared with other triggers of the\n"
5237 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005238 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005239 "\t table in its entirety to stdout. If there are multiple hist\n"
5240 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005241 "\t trigger in the output. The table displayed for a named\n"
5242 "\t trigger will be the same as any other instance having the\n"
5243 "\t same name. The default format used to display a given field\n"
5244 "\t can be modified by appending any of the following modifiers\n"
5245 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005246 "\t .hex display a number as a hex value\n"
5247 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005248 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005249 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005250 "\t .syscall display a syscall id as a syscall name\n"
5251 "\t .log2 display log2 value rather than raw number\n"
5252 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005253 "\t The 'pause' parameter can be used to pause an existing hist\n"
5254 "\t trigger or to start a hist trigger but not log any events\n"
5255 "\t until told to do so. 'continue' can be used to start or\n"
5256 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005257 "\t The 'clear' parameter will clear the contents of a running\n"
5258 "\t hist trigger and leave its current paused/active state\n"
5259 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005260 "\t The enable_hist and disable_hist triggers can be used to\n"
5261 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005262 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005263 "\t the enable_event and disable_event triggers.\n\n"
5264 "\t Hist trigger handlers and actions are executed whenever a\n"
5265 "\t a histogram entry is added or updated. They take the form:\n\n"
5266 "\t <handler>.<action>\n\n"
5267 "\t The available handlers are:\n\n"
5268 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005269 "\t onmax(var) - invoke if var exceeds current max\n"
5270 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005271 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005272 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005273 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005274#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005275 "\t snapshot() - snapshot the trace buffer\n\n"
5276#endif
5277#ifdef CONFIG_SYNTH_EVENTS
5278 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5279 "\t Write into this file to define/undefine new synthetic events.\n"
5280 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005281#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005282#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005283;
5284
5285static ssize_t
5286tracing_readme_read(struct file *filp, char __user *ubuf,
5287 size_t cnt, loff_t *ppos)
5288{
5289 return simple_read_from_buffer(ubuf, cnt, ppos,
5290 readme_msg, strlen(readme_msg));
5291}
5292
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005293static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005294 .open = tracing_open_generic,
5295 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005296 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005297};
5298
Michael Sartain99c621d2017-07-05 22:07:15 -06005299static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5300{
5301 int *ptr = v;
5302
5303 if (*pos || m->count)
5304 ptr++;
5305
5306 (*pos)++;
5307
5308 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5309 if (trace_find_tgid(*ptr))
5310 return ptr;
5311 }
5312
5313 return NULL;
5314}
5315
5316static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5317{
5318 void *v;
5319 loff_t l = 0;
5320
5321 if (!tgid_map)
5322 return NULL;
5323
5324 v = &tgid_map[0];
5325 while (l <= *pos) {
5326 v = saved_tgids_next(m, v, &l);
5327 if (!v)
5328 return NULL;
5329 }
5330
5331 return v;
5332}
5333
5334static void saved_tgids_stop(struct seq_file *m, void *v)
5335{
5336}
5337
5338static int saved_tgids_show(struct seq_file *m, void *v)
5339{
5340 int pid = (int *)v - tgid_map;
5341
5342 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5343 return 0;
5344}
5345
5346static const struct seq_operations tracing_saved_tgids_seq_ops = {
5347 .start = saved_tgids_start,
5348 .stop = saved_tgids_stop,
5349 .next = saved_tgids_next,
5350 .show = saved_tgids_show,
5351};
5352
5353static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5354{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005355 int ret;
5356
5357 ret = tracing_check_open_get_tr(NULL);
5358 if (ret)
5359 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005360
5361 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5362}
5363
5364
5365static const struct file_operations tracing_saved_tgids_fops = {
5366 .open = tracing_saved_tgids_open,
5367 .read = seq_read,
5368 .llseek = seq_lseek,
5369 .release = seq_release,
5370};
5371
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005372static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005373{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005374 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005375
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005376 if (*pos || m->count)
5377 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005378
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005379 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005380
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005381 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5382 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005383 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005384 continue;
5385
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005386 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005387 }
5388
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005389 return NULL;
5390}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005391
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005392static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5393{
5394 void *v;
5395 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005396
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005397 preempt_disable();
5398 arch_spin_lock(&trace_cmdline_lock);
5399
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005400 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005401 while (l <= *pos) {
5402 v = saved_cmdlines_next(m, v, &l);
5403 if (!v)
5404 return NULL;
5405 }
5406
5407 return v;
5408}
5409
5410static void saved_cmdlines_stop(struct seq_file *m, void *v)
5411{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005412 arch_spin_unlock(&trace_cmdline_lock);
5413 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005414}
5415
5416static int saved_cmdlines_show(struct seq_file *m, void *v)
5417{
5418 char buf[TASK_COMM_LEN];
5419 unsigned int *pid = v;
5420
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005421 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005422 seq_printf(m, "%d %s\n", *pid, buf);
5423 return 0;
5424}
5425
5426static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5427 .start = saved_cmdlines_start,
5428 .next = saved_cmdlines_next,
5429 .stop = saved_cmdlines_stop,
5430 .show = saved_cmdlines_show,
5431};
5432
5433static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5434{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005435 int ret;
5436
5437 ret = tracing_check_open_get_tr(NULL);
5438 if (ret)
5439 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005440
5441 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005442}
5443
5444static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005445 .open = tracing_saved_cmdlines_open,
5446 .read = seq_read,
5447 .llseek = seq_lseek,
5448 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005449};
5450
5451static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005452tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5453 size_t cnt, loff_t *ppos)
5454{
5455 char buf[64];
5456 int r;
5457
5458 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005459 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005460 arch_spin_unlock(&trace_cmdline_lock);
5461
5462 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5463}
5464
5465static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5466{
5467 kfree(s->saved_cmdlines);
5468 kfree(s->map_cmdline_to_pid);
5469 kfree(s);
5470}
5471
5472static int tracing_resize_saved_cmdlines(unsigned int val)
5473{
5474 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5475
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005476 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005477 if (!s)
5478 return -ENOMEM;
5479
5480 if (allocate_cmdlines_buffer(val, s) < 0) {
5481 kfree(s);
5482 return -ENOMEM;
5483 }
5484
5485 arch_spin_lock(&trace_cmdline_lock);
5486 savedcmd_temp = savedcmd;
5487 savedcmd = s;
5488 arch_spin_unlock(&trace_cmdline_lock);
5489 free_saved_cmdlines_buffer(savedcmd_temp);
5490
5491 return 0;
5492}
5493
5494static ssize_t
5495tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497{
5498 unsigned long val;
5499 int ret;
5500
5501 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5502 if (ret)
5503 return ret;
5504
5505 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5506 if (!val || val > PID_MAX_DEFAULT)
5507 return -EINVAL;
5508
5509 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5510 if (ret < 0)
5511 return ret;
5512
5513 *ppos += cnt;
5514
5515 return cnt;
5516}
5517
5518static const struct file_operations tracing_saved_cmdlines_size_fops = {
5519 .open = tracing_open_generic,
5520 .read = tracing_saved_cmdlines_size_read,
5521 .write = tracing_saved_cmdlines_size_write,
5522};
5523
Jeremy Linton681bec02017-05-31 16:56:53 -05005524#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005525static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005526update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005527{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005528 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005529 if (ptr->tail.next) {
5530 ptr = ptr->tail.next;
5531 /* Set ptr to the next real item (skip head) */
5532 ptr++;
5533 } else
5534 return NULL;
5535 }
5536 return ptr;
5537}
5538
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005539static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005540{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005541 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005542
5543 /*
5544 * Paranoid! If ptr points to end, we don't want to increment past it.
5545 * This really should never happen.
5546 */
Vasily Averin039958a2020-01-24 10:03:01 +03005547 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005548 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005549 if (WARN_ON_ONCE(!ptr))
5550 return NULL;
5551
5552 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005553 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005554
5555 return ptr;
5556}
5557
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005558static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005559{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005560 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005561 loff_t l = 0;
5562
Jeremy Linton1793ed92017-05-31 16:56:46 -05005563 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005564
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005565 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005566 if (v)
5567 v++;
5568
5569 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005570 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005571 }
5572
5573 return v;
5574}
5575
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005576static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005577{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005578 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005579}
5580
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005581static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005582{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005583 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005584
5585 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005586 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005587 ptr->map.system);
5588
5589 return 0;
5590}
5591
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005592static const struct seq_operations tracing_eval_map_seq_ops = {
5593 .start = eval_map_start,
5594 .next = eval_map_next,
5595 .stop = eval_map_stop,
5596 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005597};
5598
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005599static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005600{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005601 int ret;
5602
5603 ret = tracing_check_open_get_tr(NULL);
5604 if (ret)
5605 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005606
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005607 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005608}
5609
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005610static const struct file_operations tracing_eval_map_fops = {
5611 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005612 .read = seq_read,
5613 .llseek = seq_lseek,
5614 .release = seq_release,
5615};
5616
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005617static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005618trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005619{
5620 /* Return tail of array given the head */
5621 return ptr + ptr->head.length + 1;
5622}
5623
5624static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005625trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005626 int len)
5627{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005628 struct trace_eval_map **stop;
5629 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005630 union trace_eval_map_item *map_array;
5631 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005632
5633 stop = start + len;
5634
5635 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005636 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005637 * where the head holds the module and length of array, and the
5638 * tail holds a pointer to the next list.
5639 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005640 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005641 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005642 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005643 return;
5644 }
5645
Jeremy Linton1793ed92017-05-31 16:56:46 -05005646 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005647
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005648 if (!trace_eval_maps)
5649 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005650 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005651 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005652 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005653 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005654 if (!ptr->tail.next)
5655 break;
5656 ptr = ptr->tail.next;
5657
5658 }
5659 ptr->tail.next = map_array;
5660 }
5661 map_array->head.mod = mod;
5662 map_array->head.length = len;
5663 map_array++;
5664
5665 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5666 map_array->map = **map;
5667 map_array++;
5668 }
5669 memset(map_array, 0, sizeof(*map_array));
5670
Jeremy Linton1793ed92017-05-31 16:56:46 -05005671 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005672}
5673
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005674static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005675{
Jeremy Linton681bec02017-05-31 16:56:53 -05005676 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005677 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005678}
5679
Jeremy Linton681bec02017-05-31 16:56:53 -05005680#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005681static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5682static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005683 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005684#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005685
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005686static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005687 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005688{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005689 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005690
5691 if (len <= 0)
5692 return;
5693
5694 map = start;
5695
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005696 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005697
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005698 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005699}
5700
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005701static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005702tracing_set_trace_read(struct file *filp, char __user *ubuf,
5703 size_t cnt, loff_t *ppos)
5704{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005705 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005706 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707 int r;
5708
5709 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005710 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005711 mutex_unlock(&trace_types_lock);
5712
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005713 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005714}
5715
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005716int tracer_init(struct tracer *t, struct trace_array *tr)
5717{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005718 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005719 return t->init(tr);
5720}
5721
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005722static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005723{
5724 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005725
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005726 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005727 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005728}
5729
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005730#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005731/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005732static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5733 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005734{
5735 int cpu, ret = 0;
5736
5737 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5738 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005739 ret = ring_buffer_resize(trace_buf->buffer,
5740 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005741 if (ret < 0)
5742 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005743 per_cpu_ptr(trace_buf->data, cpu)->entries =
5744 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005745 }
5746 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005747 ret = ring_buffer_resize(trace_buf->buffer,
5748 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005749 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005750 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5751 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005752 }
5753
5754 return ret;
5755}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005756#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005757
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005758static int __tracing_resize_ring_buffer(struct trace_array *tr,
5759 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005760{
5761 int ret;
5762
5763 /*
5764 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005765 * we use the size that was given, and we can forget about
5766 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005767 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005768 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005769
Steven Rostedtb382ede62012-10-10 21:44:34 -04005770 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005771 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005772 return 0;
5773
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005774 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005775 if (ret < 0)
5776 return ret;
5777
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005778#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005779 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5780 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005781 goto out;
5782
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005783 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005784 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005785 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5786 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005787 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005788 /*
5789 * AARGH! We are left with different
5790 * size max buffer!!!!
5791 * The max buffer is our "snapshot" buffer.
5792 * When a tracer needs a snapshot (one of the
5793 * latency tracers), it swaps the max buffer
5794 * with the saved snap shot. We succeeded to
5795 * update the size of the main buffer, but failed to
5796 * update the size of the max buffer. But when we tried
5797 * to reset the main buffer to the original size, we
5798 * failed there too. This is very unlikely to
5799 * happen, but if it does, warn and kill all
5800 * tracing.
5801 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005802 WARN_ON(1);
5803 tracing_disabled = 1;
5804 }
5805 return ret;
5806 }
5807
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005808 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005809 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005810 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005811 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005812
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005813 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005814#endif /* CONFIG_TRACER_MAX_TRACE */
5815
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005816 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005817 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005818 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005819 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005820
5821 return ret;
5822}
5823
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005824ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5825 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005826{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005827 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005828
5829 mutex_lock(&trace_types_lock);
5830
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005831 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5832 /* make sure, this cpu is enabled in the mask */
5833 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5834 ret = -EINVAL;
5835 goto out;
5836 }
5837 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005838
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005839 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005840 if (ret < 0)
5841 ret = -ENOMEM;
5842
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005843out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005844 mutex_unlock(&trace_types_lock);
5845
5846 return ret;
5847}
5848
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005849
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005850/**
5851 * tracing_update_buffers - used by tracing facility to expand ring buffers
5852 *
5853 * To save on memory when the tracing is never used on a system with it
5854 * configured in. The ring buffers are set to a minimum size. But once
5855 * a user starts to use the tracing facility, then they need to grow
5856 * to their default size.
5857 *
5858 * This function is to be called when a tracer is about to be used.
5859 */
5860int tracing_update_buffers(void)
5861{
5862 int ret = 0;
5863
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005864 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005865 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005866 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005867 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005868 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005869
5870 return ret;
5871}
5872
Steven Rostedt577b7852009-02-26 23:43:05 -05005873struct trace_option_dentry;
5874
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005875static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005876create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005877
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005878/*
5879 * Used to clear out the tracer before deletion of an instance.
5880 * Must have trace_types_lock held.
5881 */
5882static void tracing_set_nop(struct trace_array *tr)
5883{
5884 if (tr->current_trace == &nop_trace)
5885 return;
5886
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005887 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005888
5889 if (tr->current_trace->reset)
5890 tr->current_trace->reset(tr);
5891
5892 tr->current_trace = &nop_trace;
5893}
5894
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005895static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005896{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005897 /* Only enable if the directory has been created already. */
5898 if (!tr->dir)
5899 return;
5900
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005901 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005902}
5903
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005904int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005905{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005906 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005907#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005908 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005909#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005910 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005911
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005912 mutex_lock(&trace_types_lock);
5913
Steven Rostedt73c51622009-03-11 13:42:01 -04005914 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005915 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005916 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005917 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005918 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005919 ret = 0;
5920 }
5921
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005922 for (t = trace_types; t; t = t->next) {
5923 if (strcmp(t->name, buf) == 0)
5924 break;
5925 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005926 if (!t) {
5927 ret = -EINVAL;
5928 goto out;
5929 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005930 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005931 goto out;
5932
Tom Zanussia35873a2019-02-13 17:42:45 -06005933#ifdef CONFIG_TRACER_SNAPSHOT
5934 if (t->use_max_tr) {
5935 arch_spin_lock(&tr->max_lock);
5936 if (tr->cond_snapshot)
5937 ret = -EBUSY;
5938 arch_spin_unlock(&tr->max_lock);
5939 if (ret)
5940 goto out;
5941 }
5942#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005943 /* Some tracers won't work on kernel command line */
5944 if (system_state < SYSTEM_RUNNING && t->noboot) {
5945 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5946 t->name);
5947 goto out;
5948 }
5949
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005950 /* Some tracers are only allowed for the top level buffer */
5951 if (!trace_ok_for_array(t, tr)) {
5952 ret = -EINVAL;
5953 goto out;
5954 }
5955
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005956 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04005957 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005958 ret = -EBUSY;
5959 goto out;
5960 }
5961
Steven Rostedt9f029e82008-11-12 15:24:24 -05005962 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005963
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005964 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005965
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005966 if (tr->current_trace->reset)
5967 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005968
Paul E. McKenney74401722018-11-06 18:44:52 -08005969 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005970 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005971
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005972#ifdef CONFIG_TRACER_MAX_TRACE
5973 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005974
5975 if (had_max_tr && !t->use_max_tr) {
5976 /*
5977 * We need to make sure that the update_max_tr sees that
5978 * current_trace changed to nop_trace to keep it from
5979 * swapping the buffers after we resize it.
5980 * The update_max_tr is called from interrupts disabled
5981 * so a synchronized_sched() is sufficient.
5982 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005983 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005984 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005985 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005986#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005987
5988#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005989 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005990 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005991 if (ret < 0)
5992 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005993 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005994#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005995
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005996 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005997 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005998 if (ret)
5999 goto out;
6000 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006001
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006002 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006003 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006004 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006005 out:
6006 mutex_unlock(&trace_types_lock);
6007
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006008 return ret;
6009}
6010
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006011static ssize_t
6012tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6013 size_t cnt, loff_t *ppos)
6014{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006015 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006016 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006017 int i;
6018 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006019 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006020
Steven Rostedt60063a62008-10-28 10:44:24 -04006021 ret = cnt;
6022
Li Zefanee6c2c12009-09-18 14:06:47 +08006023 if (cnt > MAX_TRACER_SIZE)
6024 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006025
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006026 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006027 return -EFAULT;
6028
6029 buf[cnt] = 0;
6030
6031 /* strip ending whitespace. */
6032 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6033 buf[i] = 0;
6034
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006035 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006036 if (err)
6037 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006038
Jiri Olsacf8517c2009-10-23 19:36:16 -04006039 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006040
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006041 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006042}
6043
6044static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006045tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6046 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006047{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006048 char buf[64];
6049 int r;
6050
Steven Rostedtcffae432008-05-12 21:21:00 +02006051 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006052 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006053 if (r > sizeof(buf))
6054 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006055 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006056}
6057
6058static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006059tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6060 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006061{
Hannes Eder5e398412009-02-10 19:44:34 +01006062 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006063 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006064
Peter Huewe22fe9b52011-06-07 21:58:27 +02006065 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6066 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006067 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006068
6069 *ptr = val * 1000;
6070
6071 return cnt;
6072}
6073
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006074static ssize_t
6075tracing_thresh_read(struct file *filp, char __user *ubuf,
6076 size_t cnt, loff_t *ppos)
6077{
6078 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6079}
6080
6081static ssize_t
6082tracing_thresh_write(struct file *filp, const char __user *ubuf,
6083 size_t cnt, loff_t *ppos)
6084{
6085 struct trace_array *tr = filp->private_data;
6086 int ret;
6087
6088 mutex_lock(&trace_types_lock);
6089 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6090 if (ret < 0)
6091 goto out;
6092
6093 if (tr->current_trace->update_thresh) {
6094 ret = tr->current_trace->update_thresh(tr);
6095 if (ret < 0)
6096 goto out;
6097 }
6098
6099 ret = cnt;
6100out:
6101 mutex_unlock(&trace_types_lock);
6102
6103 return ret;
6104}
6105
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006106#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006107
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006108static ssize_t
6109tracing_max_lat_read(struct file *filp, char __user *ubuf,
6110 size_t cnt, loff_t *ppos)
6111{
6112 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6113}
6114
6115static ssize_t
6116tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6117 size_t cnt, loff_t *ppos)
6118{
6119 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6120}
6121
Chen Gange428abb2015-11-10 05:15:15 +08006122#endif
6123
Steven Rostedtb3806b42008-05-12 21:20:46 +02006124static int tracing_open_pipe(struct inode *inode, struct file *filp)
6125{
Oleg Nesterov15544202013-07-23 17:25:57 +02006126 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006127 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006128 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006129
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006130 ret = tracing_check_open_get_tr(tr);
6131 if (ret)
6132 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006133
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006134 mutex_lock(&trace_types_lock);
6135
Steven Rostedtb3806b42008-05-12 21:20:46 +02006136 /* create a buffer to store the information to pass to userspace */
6137 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006138 if (!iter) {
6139 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006140 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006141 goto out;
6142 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006143
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006144 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006145 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006146
6147 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6148 ret = -ENOMEM;
6149 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306150 }
6151
Steven Rostedta3097202008-11-07 22:36:02 -05006152 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306153 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006154
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006155 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006156 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6157
David Sharp8be07092012-11-13 12:18:22 -08006158 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006159 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006160 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6161
Oleg Nesterov15544202013-07-23 17:25:57 +02006162 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006163 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006164 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006165 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006166 filp->private_data = iter;
6167
Steven Rostedt107bad82008-05-12 21:21:01 +02006168 if (iter->trace->pipe_open)
6169 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006170
Arnd Bergmannb4447862010-07-07 23:40:11 +02006171 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006172
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006173 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006174out:
6175 mutex_unlock(&trace_types_lock);
6176 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006177
6178fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006179 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006180 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006181 mutex_unlock(&trace_types_lock);
6182 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006183}
6184
6185static int tracing_release_pipe(struct inode *inode, struct file *file)
6186{
6187 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006188 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006189
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006190 mutex_lock(&trace_types_lock);
6191
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006192 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006193
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006194 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006195 iter->trace->pipe_close(iter);
6196
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006197 mutex_unlock(&trace_types_lock);
6198
Rusty Russell44623442009-01-01 10:12:23 +10306199 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006200 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006201 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006202
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006203 trace_array_put(tr);
6204
Steven Rostedtb3806b42008-05-12 21:20:46 +02006205 return 0;
6206}
6207
Al Viro9dd95742017-07-03 00:42:43 -04006208static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006209trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006210{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006211 struct trace_array *tr = iter->tr;
6212
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006213 /* Iterators are static, they should be filled or empty */
6214 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006215 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006216
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006217 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006218 /*
6219 * Always select as readable when in blocking mode
6220 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006221 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006222 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006223 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006224 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006225}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006226
Al Viro9dd95742017-07-03 00:42:43 -04006227static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006228tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6229{
6230 struct trace_iterator *iter = filp->private_data;
6231
6232 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006233}
6234
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006235/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006236static int tracing_wait_pipe(struct file *filp)
6237{
6238 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006239 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006240
6241 while (trace_empty(iter)) {
6242
6243 if ((filp->f_flags & O_NONBLOCK)) {
6244 return -EAGAIN;
6245 }
6246
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006247 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006248 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006249 * We still block if tracing is disabled, but we have never
6250 * read anything. This allows a user to cat this file, and
6251 * then enable tracing. But after we have read something,
6252 * we give an EOF when tracing is again disabled.
6253 *
6254 * iter->pos will be 0 if we haven't read anything.
6255 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006256 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006257 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006258
6259 mutex_unlock(&iter->mutex);
6260
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006261 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006262
6263 mutex_lock(&iter->mutex);
6264
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006265 if (ret)
6266 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006267 }
6268
6269 return 1;
6270}
6271
Steven Rostedtb3806b42008-05-12 21:20:46 +02006272/*
6273 * Consumer reader.
6274 */
6275static ssize_t
6276tracing_read_pipe(struct file *filp, char __user *ubuf,
6277 size_t cnt, loff_t *ppos)
6278{
6279 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006280 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006281
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006282 /*
6283 * Avoid more than one consumer on a single file descriptor
6284 * This is just a matter of traces coherency, the ring buffer itself
6285 * is protected.
6286 */
6287 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006288
6289 /* return any leftover data */
6290 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6291 if (sret != -EBUSY)
6292 goto out;
6293
6294 trace_seq_init(&iter->seq);
6295
Steven Rostedt107bad82008-05-12 21:21:01 +02006296 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006297 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6298 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006299 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006300 }
6301
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006302waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006303 sret = tracing_wait_pipe(filp);
6304 if (sret <= 0)
6305 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006306
6307 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006308 if (trace_empty(iter)) {
6309 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006310 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006311 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006312
6313 if (cnt >= PAGE_SIZE)
6314 cnt = PAGE_SIZE - 1;
6315
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006316 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006317 memset(&iter->seq, 0,
6318 sizeof(struct trace_iterator) -
6319 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006320 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006321 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006322 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006323
Lai Jiangshan4f535962009-05-18 19:35:34 +08006324 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006325 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006326 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006327 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006328 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006329
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006330 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006331 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006332 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006333 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006334 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006335 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006336 if (ret != TRACE_TYPE_NO_CONSUME)
6337 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006338
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006339 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006340 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006341
6342 /*
6343 * Setting the full flag means we reached the trace_seq buffer
6344 * size and we should leave by partial output condition above.
6345 * One of the trace_seq_* functions is not used properly.
6346 */
6347 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6348 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006349 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006350 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006351 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006352
Steven Rostedtb3806b42008-05-12 21:20:46 +02006353 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006354 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006355 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006356 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006357
6358 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006359 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006360 * entries, go back to wait for more entries.
6361 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006362 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006363 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006364
Steven Rostedt107bad82008-05-12 21:21:01 +02006365out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006366 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006367
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006368 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006369}
6370
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006371static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6372 unsigned int idx)
6373{
6374 __free_page(spd->pages[idx]);
6375}
6376
Steven Rostedt34cd4992009-02-09 12:06:29 -05006377static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006378tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006379{
6380 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006381 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006382 int ret;
6383
6384 /* Seq buffer is page-sized, exactly what we need. */
6385 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006386 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006387 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006388
6389 if (trace_seq_has_overflowed(&iter->seq)) {
6390 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006391 break;
6392 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006393
6394 /*
6395 * This should not be hit, because it should only
6396 * be set if the iter->seq overflowed. But check it
6397 * anyway to be safe.
6398 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006399 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006400 iter->seq.seq.len = save_len;
6401 break;
6402 }
6403
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006404 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006405 if (rem < count) {
6406 rem = 0;
6407 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006408 break;
6409 }
6410
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006411 if (ret != TRACE_TYPE_NO_CONSUME)
6412 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006413 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006414 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006415 rem = 0;
6416 iter->ent = NULL;
6417 break;
6418 }
6419 }
6420
6421 return rem;
6422}
6423
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006424static ssize_t tracing_splice_read_pipe(struct file *filp,
6425 loff_t *ppos,
6426 struct pipe_inode_info *pipe,
6427 size_t len,
6428 unsigned int flags)
6429{
Jens Axboe35f3d142010-05-20 10:43:18 +02006430 struct page *pages_def[PIPE_DEF_BUFFERS];
6431 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006432 struct trace_iterator *iter = filp->private_data;
6433 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006434 .pages = pages_def,
6435 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006436 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006437 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006438 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006439 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006440 };
6441 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006442 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006443 unsigned int i;
6444
Jens Axboe35f3d142010-05-20 10:43:18 +02006445 if (splice_grow_spd(pipe, &spd))
6446 return -ENOMEM;
6447
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006448 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006449
6450 if (iter->trace->splice_read) {
6451 ret = iter->trace->splice_read(iter, filp,
6452 ppos, pipe, len, flags);
6453 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006454 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006455 }
6456
6457 ret = tracing_wait_pipe(filp);
6458 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006459 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006460
Jason Wessel955b61e2010-08-05 09:22:23 -05006461 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006462 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006463 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006464 }
6465
Lai Jiangshan4f535962009-05-18 19:35:34 +08006466 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006467 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006468
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006469 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006470 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006471 spd.pages[i] = alloc_page(GFP_KERNEL);
6472 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006473 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006474
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006475 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006476
6477 /* Copy the data into the page, so we can start over. */
6478 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006479 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006480 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006481 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006482 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006483 break;
6484 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006485 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006486 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006487
Steven Rostedtf9520752009-03-02 14:04:40 -05006488 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006489 }
6490
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006491 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006492 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006493 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006494
6495 spd.nr_pages = i;
6496
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006497 if (i)
6498 ret = splice_to_pipe(pipe, &spd);
6499 else
6500 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006501out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006502 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006503 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006504
Steven Rostedt34cd4992009-02-09 12:06:29 -05006505out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006506 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006507 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006508}
6509
Steven Rostedta98a3c32008-05-12 21:20:59 +02006510static ssize_t
6511tracing_entries_read(struct file *filp, char __user *ubuf,
6512 size_t cnt, loff_t *ppos)
6513{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006514 struct inode *inode = file_inode(filp);
6515 struct trace_array *tr = inode->i_private;
6516 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006517 char buf[64];
6518 int r = 0;
6519 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006520
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006521 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006522
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006523 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006524 int cpu, buf_size_same;
6525 unsigned long size;
6526
6527 size = 0;
6528 buf_size_same = 1;
6529 /* check if all cpu sizes are same */
6530 for_each_tracing_cpu(cpu) {
6531 /* fill in the size from first enabled cpu */
6532 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006533 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6534 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006535 buf_size_same = 0;
6536 break;
6537 }
6538 }
6539
6540 if (buf_size_same) {
6541 if (!ring_buffer_expanded)
6542 r = sprintf(buf, "%lu (expanded: %lu)\n",
6543 size >> 10,
6544 trace_buf_size >> 10);
6545 else
6546 r = sprintf(buf, "%lu\n", size >> 10);
6547 } else
6548 r = sprintf(buf, "X\n");
6549 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006550 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006551
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006552 mutex_unlock(&trace_types_lock);
6553
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006554 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6555 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006556}
6557
6558static ssize_t
6559tracing_entries_write(struct file *filp, const char __user *ubuf,
6560 size_t cnt, loff_t *ppos)
6561{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006562 struct inode *inode = file_inode(filp);
6563 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006564 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006565 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006566
Peter Huewe22fe9b52011-06-07 21:58:27 +02006567 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6568 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006569 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006570
6571 /* must have at least 1 entry */
6572 if (!val)
6573 return -EINVAL;
6574
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006575 /* value is in KB */
6576 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006577 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006578 if (ret < 0)
6579 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006580
Jiri Olsacf8517c2009-10-23 19:36:16 -04006581 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006582
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006583 return cnt;
6584}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006585
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006586static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006587tracing_total_entries_read(struct file *filp, char __user *ubuf,
6588 size_t cnt, loff_t *ppos)
6589{
6590 struct trace_array *tr = filp->private_data;
6591 char buf[64];
6592 int r, cpu;
6593 unsigned long size = 0, expanded_size = 0;
6594
6595 mutex_lock(&trace_types_lock);
6596 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006597 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006598 if (!ring_buffer_expanded)
6599 expanded_size += trace_buf_size >> 10;
6600 }
6601 if (ring_buffer_expanded)
6602 r = sprintf(buf, "%lu\n", size);
6603 else
6604 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6605 mutex_unlock(&trace_types_lock);
6606
6607 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6608}
6609
6610static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006611tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6612 size_t cnt, loff_t *ppos)
6613{
6614 /*
6615 * There is no need to read what the user has written, this function
6616 * is just to make sure that there is no error when "echo" is used
6617 */
6618
6619 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006620
6621 return cnt;
6622}
6623
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006624static int
6625tracing_free_buffer_release(struct inode *inode, struct file *filp)
6626{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006627 struct trace_array *tr = inode->i_private;
6628
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006629 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006630 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006631 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006632 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006633 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006634
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006635 trace_array_put(tr);
6636
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006637 return 0;
6638}
6639
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006640static ssize_t
6641tracing_mark_write(struct file *filp, const char __user *ubuf,
6642 size_t cnt, loff_t *fpos)
6643{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006644 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006645 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006646 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006647 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006648 struct print_entry *entry;
6649 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006650 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006651 int size;
6652 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006653
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006654/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006655#define FAULTED_STR "<faulted>"
6656#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006657
Steven Rostedtc76f0692008-11-07 22:36:02 -05006658 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006659 return -EINVAL;
6660
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006661 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006662 return -EINVAL;
6663
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006664 if (cnt > TRACE_BUF_SIZE)
6665 cnt = TRACE_BUF_SIZE;
6666
Steven Rostedtd696b582011-09-22 11:50:27 -04006667 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006668
Steven Rostedtd696b582011-09-22 11:50:27 -04006669 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006670 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6671
6672 /* If less than "<faulted>", then make sure we can still add that */
6673 if (cnt < FAULTED_SIZE)
6674 size += FAULTED_SIZE - cnt;
6675
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006676 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006677 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6678 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006679 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006680 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006681 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006682
6683 entry = ring_buffer_event_data(event);
6684 entry->ip = _THIS_IP_;
6685
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006686 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6687 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006688 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006689 cnt = FAULTED_SIZE;
6690 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006691 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006692 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006693
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006694 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6695 /* do not add \n before testing triggers, but add \0 */
6696 entry->buf[cnt] = '\0';
6697 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6698 }
6699
Steven Rostedtd696b582011-09-22 11:50:27 -04006700 if (entry->buf[cnt - 1] != '\n') {
6701 entry->buf[cnt] = '\n';
6702 entry->buf[cnt + 1] = '\0';
6703 } else
6704 entry->buf[cnt] = '\0';
6705
Tingwei Zhang458999c2020-10-05 10:13:15 +03006706 if (static_branch_unlikely(&trace_marker_exports_enabled))
6707 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006708 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006709
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006710 if (tt)
6711 event_triggers_post_call(tr->trace_marker_file, tt);
6712
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006713 if (written > 0)
6714 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006715
Steven Rostedtfa32e852016-07-06 15:25:08 -04006716 return written;
6717}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006718
Steven Rostedtfa32e852016-07-06 15:25:08 -04006719/* Limit it for now to 3K (including tag) */
6720#define RAW_DATA_MAX_SIZE (1024*3)
6721
6722static ssize_t
6723tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6724 size_t cnt, loff_t *fpos)
6725{
6726 struct trace_array *tr = filp->private_data;
6727 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006728 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006729 struct raw_data_entry *entry;
6730 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006731 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006732 int size;
6733 int len;
6734
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006735#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6736
Steven Rostedtfa32e852016-07-06 15:25:08 -04006737 if (tracing_disabled)
6738 return -EINVAL;
6739
6740 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6741 return -EINVAL;
6742
6743 /* The marker must at least have a tag id */
6744 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6745 return -EINVAL;
6746
6747 if (cnt > TRACE_BUF_SIZE)
6748 cnt = TRACE_BUF_SIZE;
6749
6750 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6751
Steven Rostedtfa32e852016-07-06 15:25:08 -04006752 local_save_flags(irq_flags);
6753 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006754 if (cnt < FAULT_SIZE_ID)
6755 size += FAULT_SIZE_ID - cnt;
6756
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006757 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006758 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6759 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006760 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006761 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006762 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006763
6764 entry = ring_buffer_event_data(event);
6765
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006766 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6767 if (len) {
6768 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006769 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006770 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006771 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006772 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006773
6774 __buffer_unlock_commit(buffer, event);
6775
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006776 if (written > 0)
6777 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006778
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006779 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006780}
6781
Li Zefan13f16d22009-12-08 11:16:11 +08006782static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006783{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006784 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006785 int i;
6786
6787 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006788 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006789 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006790 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6791 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006792 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006793
Li Zefan13f16d22009-12-08 11:16:11 +08006794 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006795}
6796
Tom Zanussid71bd342018-01-15 20:52:07 -06006797int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006798{
Zhaolei5079f322009-08-25 16:12:56 +08006799 int i;
6800
Zhaolei5079f322009-08-25 16:12:56 +08006801 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6802 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6803 break;
6804 }
6805 if (i == ARRAY_SIZE(trace_clocks))
6806 return -EINVAL;
6807
Zhaolei5079f322009-08-25 16:12:56 +08006808 mutex_lock(&trace_types_lock);
6809
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006810 tr->clock_id = i;
6811
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006812 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006813
David Sharp60303ed2012-10-11 16:27:52 -07006814 /*
6815 * New clock may not be consistent with the previous clock.
6816 * Reset the buffer so that it doesn't have incomparable timestamps.
6817 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006818 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006819
6820#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006821 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006822 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006823 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006824#endif
David Sharp60303ed2012-10-11 16:27:52 -07006825
Zhaolei5079f322009-08-25 16:12:56 +08006826 mutex_unlock(&trace_types_lock);
6827
Steven Rostedte1e232c2014-02-10 23:38:46 -05006828 return 0;
6829}
6830
6831static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6832 size_t cnt, loff_t *fpos)
6833{
6834 struct seq_file *m = filp->private_data;
6835 struct trace_array *tr = m->private;
6836 char buf[64];
6837 const char *clockstr;
6838 int ret;
6839
6840 if (cnt >= sizeof(buf))
6841 return -EINVAL;
6842
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006843 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006844 return -EFAULT;
6845
6846 buf[cnt] = 0;
6847
6848 clockstr = strstrip(buf);
6849
6850 ret = tracing_set_clock(tr, clockstr);
6851 if (ret)
6852 return ret;
6853
Zhaolei5079f322009-08-25 16:12:56 +08006854 *fpos += cnt;
6855
6856 return cnt;
6857}
6858
Li Zefan13f16d22009-12-08 11:16:11 +08006859static int tracing_clock_open(struct inode *inode, struct file *file)
6860{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006861 struct trace_array *tr = inode->i_private;
6862 int ret;
6863
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006864 ret = tracing_check_open_get_tr(tr);
6865 if (ret)
6866 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006867
6868 ret = single_open(file, tracing_clock_show, inode->i_private);
6869 if (ret < 0)
6870 trace_array_put(tr);
6871
6872 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006873}
6874
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006875static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6876{
6877 struct trace_array *tr = m->private;
6878
6879 mutex_lock(&trace_types_lock);
6880
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006881 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006882 seq_puts(m, "delta [absolute]\n");
6883 else
6884 seq_puts(m, "[delta] absolute\n");
6885
6886 mutex_unlock(&trace_types_lock);
6887
6888 return 0;
6889}
6890
6891static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6892{
6893 struct trace_array *tr = inode->i_private;
6894 int ret;
6895
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006896 ret = tracing_check_open_get_tr(tr);
6897 if (ret)
6898 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006899
6900 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6901 if (ret < 0)
6902 trace_array_put(tr);
6903
6904 return ret;
6905}
6906
Tom Zanussi00b41452018-01-15 20:51:39 -06006907int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6908{
6909 int ret = 0;
6910
6911 mutex_lock(&trace_types_lock);
6912
6913 if (abs && tr->time_stamp_abs_ref++)
6914 goto out;
6915
6916 if (!abs) {
6917 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6918 ret = -EINVAL;
6919 goto out;
6920 }
6921
6922 if (--tr->time_stamp_abs_ref)
6923 goto out;
6924 }
6925
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006926 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006927
6928#ifdef CONFIG_TRACER_MAX_TRACE
6929 if (tr->max_buffer.buffer)
6930 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6931#endif
6932 out:
6933 mutex_unlock(&trace_types_lock);
6934
6935 return ret;
6936}
6937
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006938struct ftrace_buffer_info {
6939 struct trace_iterator iter;
6940 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006941 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006942 unsigned int read;
6943};
6944
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006945#ifdef CONFIG_TRACER_SNAPSHOT
6946static int tracing_snapshot_open(struct inode *inode, struct file *file)
6947{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006948 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006949 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006950 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006951 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006952
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006953 ret = tracing_check_open_get_tr(tr);
6954 if (ret)
6955 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006956
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006957 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006958 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006959 if (IS_ERR(iter))
6960 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006961 } else {
6962 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006963 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006964 m = kzalloc(sizeof(*m), GFP_KERNEL);
6965 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006966 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006967 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6968 if (!iter) {
6969 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006970 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006971 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006972 ret = 0;
6973
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006974 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006975 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006976 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006977 m->private = iter;
6978 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006979 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006980out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006981 if (ret < 0)
6982 trace_array_put(tr);
6983
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006984 return ret;
6985}
6986
6987static ssize_t
6988tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6989 loff_t *ppos)
6990{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006991 struct seq_file *m = filp->private_data;
6992 struct trace_iterator *iter = m->private;
6993 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006994 unsigned long val;
6995 int ret;
6996
6997 ret = tracing_update_buffers();
6998 if (ret < 0)
6999 return ret;
7000
7001 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7002 if (ret)
7003 return ret;
7004
7005 mutex_lock(&trace_types_lock);
7006
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007007 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007008 ret = -EBUSY;
7009 goto out;
7010 }
7011
Tom Zanussia35873a2019-02-13 17:42:45 -06007012 arch_spin_lock(&tr->max_lock);
7013 if (tr->cond_snapshot)
7014 ret = -EBUSY;
7015 arch_spin_unlock(&tr->max_lock);
7016 if (ret)
7017 goto out;
7018
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007019 switch (val) {
7020 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007021 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7022 ret = -EINVAL;
7023 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007024 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007025 if (tr->allocated_snapshot)
7026 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007027 break;
7028 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007029/* Only allow per-cpu swap if the ring buffer supports it */
7030#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7031 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7032 ret = -EINVAL;
7033 break;
7034 }
7035#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007036 if (tr->allocated_snapshot)
7037 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007038 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007039 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007040 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007041 if (ret < 0)
7042 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007043 local_irq_disable();
7044 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007045 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007046 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007047 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007048 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007049 local_irq_enable();
7050 break;
7051 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007052 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007053 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7054 tracing_reset_online_cpus(&tr->max_buffer);
7055 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007056 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007057 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007058 break;
7059 }
7060
7061 if (ret >= 0) {
7062 *ppos += cnt;
7063 ret = cnt;
7064 }
7065out:
7066 mutex_unlock(&trace_types_lock);
7067 return ret;
7068}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007069
7070static int tracing_snapshot_release(struct inode *inode, struct file *file)
7071{
7072 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007073 int ret;
7074
7075 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007076
7077 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007078 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007079
7080 /* If write only, the seq_file is just a stub */
7081 if (m)
7082 kfree(m->private);
7083 kfree(m);
7084
7085 return 0;
7086}
7087
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007088static int tracing_buffers_open(struct inode *inode, struct file *filp);
7089static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7090 size_t count, loff_t *ppos);
7091static int tracing_buffers_release(struct inode *inode, struct file *file);
7092static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7093 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7094
7095static int snapshot_raw_open(struct inode *inode, struct file *filp)
7096{
7097 struct ftrace_buffer_info *info;
7098 int ret;
7099
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007100 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007101 ret = tracing_buffers_open(inode, filp);
7102 if (ret < 0)
7103 return ret;
7104
7105 info = filp->private_data;
7106
7107 if (info->iter.trace->use_max_tr) {
7108 tracing_buffers_release(inode, filp);
7109 return -EBUSY;
7110 }
7111
7112 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007113 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007114
7115 return ret;
7116}
7117
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007118#endif /* CONFIG_TRACER_SNAPSHOT */
7119
7120
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007121static const struct file_operations tracing_thresh_fops = {
7122 .open = tracing_open_generic,
7123 .read = tracing_thresh_read,
7124 .write = tracing_thresh_write,
7125 .llseek = generic_file_llseek,
7126};
7127
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007128#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007129static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007130 .open = tracing_open_generic,
7131 .read = tracing_max_lat_read,
7132 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007133 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007134};
Chen Gange428abb2015-11-10 05:15:15 +08007135#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007136
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007137static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007138 .open = tracing_open_generic,
7139 .read = tracing_set_trace_read,
7140 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007141 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007142};
7143
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007144static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007145 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007146 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007147 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007148 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007149 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007150 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007151};
7152
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007153static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007154 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007155 .read = tracing_entries_read,
7156 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007157 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007158 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007159};
7160
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007161static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007162 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007163 .read = tracing_total_entries_read,
7164 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007165 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007166};
7167
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007168static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007169 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007170 .write = tracing_free_buffer_write,
7171 .release = tracing_free_buffer_release,
7172};
7173
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007174static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007175 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007176 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007177 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007178 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007179};
7180
Steven Rostedtfa32e852016-07-06 15:25:08 -04007181static const struct file_operations tracing_mark_raw_fops = {
7182 .open = tracing_open_generic_tr,
7183 .write = tracing_mark_raw_write,
7184 .llseek = generic_file_llseek,
7185 .release = tracing_release_generic_tr,
7186};
7187
Zhaolei5079f322009-08-25 16:12:56 +08007188static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007189 .open = tracing_clock_open,
7190 .read = seq_read,
7191 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007192 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007193 .write = tracing_clock_write,
7194};
7195
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007196static const struct file_operations trace_time_stamp_mode_fops = {
7197 .open = tracing_time_stamp_mode_open,
7198 .read = seq_read,
7199 .llseek = seq_lseek,
7200 .release = tracing_single_release_tr,
7201};
7202
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007203#ifdef CONFIG_TRACER_SNAPSHOT
7204static const struct file_operations snapshot_fops = {
7205 .open = tracing_snapshot_open,
7206 .read = seq_read,
7207 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007208 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007209 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007210};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007211
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007212static const struct file_operations snapshot_raw_fops = {
7213 .open = snapshot_raw_open,
7214 .read = tracing_buffers_read,
7215 .release = tracing_buffers_release,
7216 .splice_read = tracing_buffers_splice_read,
7217 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007218};
7219
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007220#endif /* CONFIG_TRACER_SNAPSHOT */
7221
Tom Zanussi8a062902019-03-31 18:48:15 -05007222#define TRACING_LOG_ERRS_MAX 8
7223#define TRACING_LOG_LOC_MAX 128
7224
7225#define CMD_PREFIX " Command: "
7226
7227struct err_info {
7228 const char **errs; /* ptr to loc-specific array of err strings */
7229 u8 type; /* index into errs -> specific err string */
7230 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7231 u64 ts;
7232};
7233
7234struct tracing_log_err {
7235 struct list_head list;
7236 struct err_info info;
7237 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7238 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7239};
7240
Tom Zanussi8a062902019-03-31 18:48:15 -05007241static DEFINE_MUTEX(tracing_err_log_lock);
7242
YueHaibingff585c52019-06-14 23:32:10 +08007243static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007244{
7245 struct tracing_log_err *err;
7246
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007247 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007248 err = kzalloc(sizeof(*err), GFP_KERNEL);
7249 if (!err)
7250 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007251 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007252
7253 return err;
7254 }
7255
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007256 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007257 list_del(&err->list);
7258
7259 return err;
7260}
7261
7262/**
7263 * err_pos - find the position of a string within a command for error careting
7264 * @cmd: The tracing command that caused the error
7265 * @str: The string to position the caret at within @cmd
7266 *
7267 * Finds the position of the first occurence of @str within @cmd. The
7268 * return value can be passed to tracing_log_err() for caret placement
7269 * within @cmd.
7270 *
7271 * Returns the index within @cmd of the first occurence of @str or 0
7272 * if @str was not found.
7273 */
7274unsigned int err_pos(char *cmd, const char *str)
7275{
7276 char *found;
7277
7278 if (WARN_ON(!strlen(cmd)))
7279 return 0;
7280
7281 found = strstr(cmd, str);
7282 if (found)
7283 return found - cmd;
7284
7285 return 0;
7286}
7287
7288/**
7289 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007290 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007291 * @loc: A string describing where the error occurred
7292 * @cmd: The tracing command that caused the error
7293 * @errs: The array of loc-specific static error strings
7294 * @type: The index into errs[], which produces the specific static err string
7295 * @pos: The position the caret should be placed in the cmd
7296 *
7297 * Writes an error into tracing/error_log of the form:
7298 *
7299 * <loc>: error: <text>
7300 * Command: <cmd>
7301 * ^
7302 *
7303 * tracing/error_log is a small log file containing the last
7304 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7305 * unless there has been a tracing error, and the error log can be
7306 * cleared and have its memory freed by writing the empty string in
7307 * truncation mode to it i.e. echo > tracing/error_log.
7308 *
7309 * NOTE: the @errs array along with the @type param are used to
7310 * produce a static error string - this string is not copied and saved
7311 * when the error is logged - only a pointer to it is saved. See
7312 * existing callers for examples of how static strings are typically
7313 * defined for use with tracing_log_err().
7314 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007315void tracing_log_err(struct trace_array *tr,
7316 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007317 const char **errs, u8 type, u8 pos)
7318{
7319 struct tracing_log_err *err;
7320
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007321 if (!tr)
7322 tr = &global_trace;
7323
Tom Zanussi8a062902019-03-31 18:48:15 -05007324 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007325 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007326 if (PTR_ERR(err) == -ENOMEM) {
7327 mutex_unlock(&tracing_err_log_lock);
7328 return;
7329 }
7330
7331 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7332 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7333
7334 err->info.errs = errs;
7335 err->info.type = type;
7336 err->info.pos = pos;
7337 err->info.ts = local_clock();
7338
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007339 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007340 mutex_unlock(&tracing_err_log_lock);
7341}
7342
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007343static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007344{
7345 struct tracing_log_err *err, *next;
7346
7347 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007348 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007349 list_del(&err->list);
7350 kfree(err);
7351 }
7352
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007353 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007354 mutex_unlock(&tracing_err_log_lock);
7355}
7356
7357static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7358{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007359 struct trace_array *tr = m->private;
7360
Tom Zanussi8a062902019-03-31 18:48:15 -05007361 mutex_lock(&tracing_err_log_lock);
7362
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007363 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007364}
7365
7366static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7367{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007368 struct trace_array *tr = m->private;
7369
7370 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007371}
7372
7373static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7374{
7375 mutex_unlock(&tracing_err_log_lock);
7376}
7377
7378static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7379{
7380 u8 i;
7381
7382 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7383 seq_putc(m, ' ');
7384 for (i = 0; i < pos; i++)
7385 seq_putc(m, ' ');
7386 seq_puts(m, "^\n");
7387}
7388
7389static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7390{
7391 struct tracing_log_err *err = v;
7392
7393 if (err) {
7394 const char *err_text = err->info.errs[err->info.type];
7395 u64 sec = err->info.ts;
7396 u32 nsec;
7397
7398 nsec = do_div(sec, NSEC_PER_SEC);
7399 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7400 err->loc, err_text);
7401 seq_printf(m, "%s", err->cmd);
7402 tracing_err_log_show_pos(m, err->info.pos);
7403 }
7404
7405 return 0;
7406}
7407
7408static const struct seq_operations tracing_err_log_seq_ops = {
7409 .start = tracing_err_log_seq_start,
7410 .next = tracing_err_log_seq_next,
7411 .stop = tracing_err_log_seq_stop,
7412 .show = tracing_err_log_seq_show
7413};
7414
7415static int tracing_err_log_open(struct inode *inode, struct file *file)
7416{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007417 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007418 int ret = 0;
7419
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007420 ret = tracing_check_open_get_tr(tr);
7421 if (ret)
7422 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007423
Tom Zanussi8a062902019-03-31 18:48:15 -05007424 /* If this file was opened for write, then erase contents */
7425 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007426 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007427
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007428 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007429 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007430 if (!ret) {
7431 struct seq_file *m = file->private_data;
7432 m->private = tr;
7433 } else {
7434 trace_array_put(tr);
7435 }
7436 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007437 return ret;
7438}
7439
7440static ssize_t tracing_err_log_write(struct file *file,
7441 const char __user *buffer,
7442 size_t count, loff_t *ppos)
7443{
7444 return count;
7445}
7446
Takeshi Misawad122ed62019-06-28 19:56:40 +09007447static int tracing_err_log_release(struct inode *inode, struct file *file)
7448{
7449 struct trace_array *tr = inode->i_private;
7450
7451 trace_array_put(tr);
7452
7453 if (file->f_mode & FMODE_READ)
7454 seq_release(inode, file);
7455
7456 return 0;
7457}
7458
Tom Zanussi8a062902019-03-31 18:48:15 -05007459static const struct file_operations tracing_err_log_fops = {
7460 .open = tracing_err_log_open,
7461 .write = tracing_err_log_write,
7462 .read = seq_read,
7463 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007464 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007465};
7466
Steven Rostedt2cadf912008-12-01 22:20:19 -05007467static int tracing_buffers_open(struct inode *inode, struct file *filp)
7468{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007469 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007470 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007471 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007472
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007473 ret = tracing_check_open_get_tr(tr);
7474 if (ret)
7475 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007476
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007477 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007478 if (!info) {
7479 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007480 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007481 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007482
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007483 mutex_lock(&trace_types_lock);
7484
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007485 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007486 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007487 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007488 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007489 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007490 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007491 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007492
7493 filp->private_data = info;
7494
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007495 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007496
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007497 mutex_unlock(&trace_types_lock);
7498
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007499 ret = nonseekable_open(inode, filp);
7500 if (ret < 0)
7501 trace_array_put(tr);
7502
7503 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007504}
7505
Al Viro9dd95742017-07-03 00:42:43 -04007506static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007507tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7508{
7509 struct ftrace_buffer_info *info = filp->private_data;
7510 struct trace_iterator *iter = &info->iter;
7511
7512 return trace_poll(iter, filp, poll_table);
7513}
7514
Steven Rostedt2cadf912008-12-01 22:20:19 -05007515static ssize_t
7516tracing_buffers_read(struct file *filp, char __user *ubuf,
7517 size_t count, loff_t *ppos)
7518{
7519 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007520 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007521 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007522 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007523
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007524 if (!count)
7525 return 0;
7526
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007527#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007528 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7529 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007530#endif
7531
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007532 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007533 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007534 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007535 if (IS_ERR(info->spare)) {
7536 ret = PTR_ERR(info->spare);
7537 info->spare = NULL;
7538 } else {
7539 info->spare_cpu = iter->cpu_file;
7540 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007541 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007542 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007543 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007544
Steven Rostedt2cadf912008-12-01 22:20:19 -05007545 /* Do we have previous read data to read? */
7546 if (info->read < PAGE_SIZE)
7547 goto read;
7548
Steven Rostedtb6273442013-02-28 13:44:11 -05007549 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007550 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007551 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007552 &info->spare,
7553 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007554 iter->cpu_file, 0);
7555 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007556
7557 if (ret < 0) {
7558 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007559 if ((filp->f_flags & O_NONBLOCK))
7560 return -EAGAIN;
7561
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007562 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007563 if (ret)
7564 return ret;
7565
Steven Rostedtb6273442013-02-28 13:44:11 -05007566 goto again;
7567 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007568 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007569 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007570
Steven Rostedt436fc282011-10-14 10:44:25 -04007571 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007572 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007573 size = PAGE_SIZE - info->read;
7574 if (size > count)
7575 size = count;
7576
7577 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007578 if (ret == size)
7579 return -EFAULT;
7580
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007581 size -= ret;
7582
Steven Rostedt2cadf912008-12-01 22:20:19 -05007583 *ppos += size;
7584 info->read += size;
7585
7586 return size;
7587}
7588
7589static int tracing_buffers_release(struct inode *inode, struct file *file)
7590{
7591 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007592 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007593
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007594 mutex_lock(&trace_types_lock);
7595
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007596 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007597
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007598 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007599
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007600 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007601 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007602 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007603 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007604
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007605 mutex_unlock(&trace_types_lock);
7606
Steven Rostedt2cadf912008-12-01 22:20:19 -05007607 return 0;
7608}
7609
7610struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007611 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007612 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007613 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007614 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007615};
7616
Jann Hornb9872222019-04-04 23:59:25 +02007617static void buffer_ref_release(struct buffer_ref *ref)
7618{
7619 if (!refcount_dec_and_test(&ref->refcount))
7620 return;
7621 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7622 kfree(ref);
7623}
7624
Steven Rostedt2cadf912008-12-01 22:20:19 -05007625static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7626 struct pipe_buffer *buf)
7627{
7628 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7629
Jann Hornb9872222019-04-04 23:59:25 +02007630 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007631 buf->private = 0;
7632}
7633
Matthew Wilcox15fab632019-04-05 14:02:10 -07007634static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007635 struct pipe_buffer *buf)
7636{
7637 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7638
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007639 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007640 return false;
7641
Jann Hornb9872222019-04-04 23:59:25 +02007642 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007643 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007644}
7645
7646/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007647static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007648 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007649 .get = buffer_pipe_buf_get,
7650};
7651
7652/*
7653 * Callback from splice_to_pipe(), if we need to release some pages
7654 * at the end of the spd in case we error'ed out in filling the pipe.
7655 */
7656static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7657{
7658 struct buffer_ref *ref =
7659 (struct buffer_ref *)spd->partial[i].private;
7660
Jann Hornb9872222019-04-04 23:59:25 +02007661 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007662 spd->partial[i].private = 0;
7663}
7664
7665static ssize_t
7666tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7667 struct pipe_inode_info *pipe, size_t len,
7668 unsigned int flags)
7669{
7670 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007671 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007672 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7673 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007674 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007675 .pages = pages_def,
7676 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007677 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007678 .ops = &buffer_pipe_buf_ops,
7679 .spd_release = buffer_spd_release,
7680 };
7681 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007682 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007683 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007684
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007685#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007686 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7687 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007688#endif
7689
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007690 if (*ppos & (PAGE_SIZE - 1))
7691 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007692
7693 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007694 if (len < PAGE_SIZE)
7695 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007696 len &= PAGE_MASK;
7697 }
7698
Al Viro1ae22932016-09-17 18:31:46 -04007699 if (splice_grow_spd(pipe, &spd))
7700 return -ENOMEM;
7701
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007702 again:
7703 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007704 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007705
Al Viroa786c062014-04-11 12:01:03 -04007706 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007707 struct page *page;
7708 int r;
7709
7710 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007711 if (!ref) {
7712 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007713 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007714 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007715
Jann Hornb9872222019-04-04 23:59:25 +02007716 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007717 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007718 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007719 if (IS_ERR(ref->page)) {
7720 ret = PTR_ERR(ref->page);
7721 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007722 kfree(ref);
7723 break;
7724 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007725 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007726
7727 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007728 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007729 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007730 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7731 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007732 kfree(ref);
7733 break;
7734 }
7735
Steven Rostedt2cadf912008-12-01 22:20:19 -05007736 page = virt_to_page(ref->page);
7737
7738 spd.pages[i] = page;
7739 spd.partial[i].len = PAGE_SIZE;
7740 spd.partial[i].offset = 0;
7741 spd.partial[i].private = (unsigned long)ref;
7742 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007743 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007744
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007745 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007746 }
7747
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007748 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007749 spd.nr_pages = i;
7750
7751 /* did we read anything? */
7752 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007753 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007754 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007755
Al Viro1ae22932016-09-17 18:31:46 -04007756 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007757 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007758 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007759
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007760 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007761 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007762 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007763
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007764 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007765 }
7766
7767 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007768out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007769 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007770
Steven Rostedt2cadf912008-12-01 22:20:19 -05007771 return ret;
7772}
7773
7774static const struct file_operations tracing_buffers_fops = {
7775 .open = tracing_buffers_open,
7776 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007777 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007778 .release = tracing_buffers_release,
7779 .splice_read = tracing_buffers_splice_read,
7780 .llseek = no_llseek,
7781};
7782
Steven Rostedtc8d77182009-04-29 18:03:45 -04007783static ssize_t
7784tracing_stats_read(struct file *filp, char __user *ubuf,
7785 size_t count, loff_t *ppos)
7786{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007787 struct inode *inode = file_inode(filp);
7788 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007789 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007790 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007791 struct trace_seq *s;
7792 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007793 unsigned long long t;
7794 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007795
Li Zefane4f2d102009-06-15 10:57:28 +08007796 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007797 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007798 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007799
7800 trace_seq_init(s);
7801
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007802 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007803 trace_seq_printf(s, "entries: %ld\n", cnt);
7804
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007805 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007806 trace_seq_printf(s, "overrun: %ld\n", cnt);
7807
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007808 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007809 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7810
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007811 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007812 trace_seq_printf(s, "bytes: %ld\n", cnt);
7813
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007814 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007815 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007816 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007817 usec_rem = do_div(t, USEC_PER_SEC);
7818 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7819 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007820
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007821 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007822 usec_rem = do_div(t, USEC_PER_SEC);
7823 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7824 } else {
7825 /* counter or tsc mode for trace_clock */
7826 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007827 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007828
7829 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007830 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007831 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007832
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007833 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007834 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7835
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007836 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007837 trace_seq_printf(s, "read events: %ld\n", cnt);
7838
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007839 count = simple_read_from_buffer(ubuf, count, ppos,
7840 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007841
7842 kfree(s);
7843
7844 return count;
7845}
7846
7847static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007848 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007849 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007850 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007851 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007852};
7853
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007854#ifdef CONFIG_DYNAMIC_FTRACE
7855
7856static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007857tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007858 size_t cnt, loff_t *ppos)
7859{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007860 ssize_t ret;
7861 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007862 int r;
7863
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007864 /* 256 should be plenty to hold the amount needed */
7865 buf = kmalloc(256, GFP_KERNEL);
7866 if (!buf)
7867 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007868
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007869 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7870 ftrace_update_tot_cnt,
7871 ftrace_number_of_pages,
7872 ftrace_number_of_groups);
7873
7874 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7875 kfree(buf);
7876 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007877}
7878
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007879static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007880 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007881 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007882 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007883};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007884#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007885
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007886#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7887static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007888ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007889 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007890 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007891{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007892 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007893}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007894
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007895static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007896ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007897 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007898 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007899{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007900 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007901 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007902
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007903 if (mapper)
7904 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007905
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007906 if (count) {
7907
7908 if (*count <= 0)
7909 return;
7910
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007911 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007912 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007913
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007914 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007915}
7916
7917static int
7918ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7919 struct ftrace_probe_ops *ops, void *data)
7920{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007921 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007922 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007923
7924 seq_printf(m, "%ps:", (void *)ip);
7925
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007926 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007927
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007928 if (mapper)
7929 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7930
7931 if (count)
7932 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007933 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007934 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007935
7936 return 0;
7937}
7938
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007939static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007940ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007941 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007942{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007943 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007944
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007945 if (!mapper) {
7946 mapper = allocate_ftrace_func_mapper();
7947 if (!mapper)
7948 return -ENOMEM;
7949 *data = mapper;
7950 }
7951
7952 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007953}
7954
7955static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007956ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007957 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007958{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007959 struct ftrace_func_mapper *mapper = data;
7960
7961 if (!ip) {
7962 if (!mapper)
7963 return;
7964 free_ftrace_func_mapper(mapper, NULL);
7965 return;
7966 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007967
7968 ftrace_func_mapper_remove_ip(mapper, ip);
7969}
7970
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007971static struct ftrace_probe_ops snapshot_probe_ops = {
7972 .func = ftrace_snapshot,
7973 .print = ftrace_snapshot_print,
7974};
7975
7976static struct ftrace_probe_ops snapshot_count_probe_ops = {
7977 .func = ftrace_count_snapshot,
7978 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007979 .init = ftrace_snapshot_init,
7980 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007981};
7982
7983static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007984ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007985 char *glob, char *cmd, char *param, int enable)
7986{
7987 struct ftrace_probe_ops *ops;
7988 void *count = (void *)-1;
7989 char *number;
7990 int ret;
7991
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007992 if (!tr)
7993 return -ENODEV;
7994
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007995 /* hash funcs only work with set_ftrace_filter */
7996 if (!enable)
7997 return -EINVAL;
7998
7999 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8000
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008001 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008002 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008003
8004 if (!param)
8005 goto out_reg;
8006
8007 number = strsep(&param, ":");
8008
8009 if (!strlen(number))
8010 goto out_reg;
8011
8012 /*
8013 * We use the callback data field (which is a pointer)
8014 * as our counter.
8015 */
8016 ret = kstrtoul(number, 0, (unsigned long *)&count);
8017 if (ret)
8018 return ret;
8019
8020 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008021 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008022 if (ret < 0)
8023 goto out;
8024
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008025 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008026
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008027 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008028 return ret < 0 ? ret : 0;
8029}
8030
8031static struct ftrace_func_command ftrace_snapshot_cmd = {
8032 .name = "snapshot",
8033 .func = ftrace_trace_snapshot_callback,
8034};
8035
Tom Zanussi38de93a2013-10-24 08:34:18 -05008036static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008037{
8038 return register_ftrace_command(&ftrace_snapshot_cmd);
8039}
8040#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008041static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008042#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008043
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008044static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008045{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008046 if (WARN_ON(!tr->dir))
8047 return ERR_PTR(-ENODEV);
8048
8049 /* Top directory uses NULL as the parent */
8050 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8051 return NULL;
8052
8053 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008054 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008055}
8056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008057static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8058{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008059 struct dentry *d_tracer;
8060
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008061 if (tr->percpu_dir)
8062 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008063
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008064 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008065 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008066 return NULL;
8067
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008068 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008069
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008070 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008071 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008072
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008073 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008074}
8075
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008076static struct dentry *
8077trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8078 void *data, long cpu, const struct file_operations *fops)
8079{
8080 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8081
8082 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008083 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008084 return ret;
8085}
8086
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008087static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008088tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008089{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008090 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008091 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008092 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008093
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008094 if (!d_percpu)
8095 return;
8096
Steven Rostedtdd49a382010-10-20 21:51:26 -04008097 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008098 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008099 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008100 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008101 return;
8102 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008103
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008104 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008105 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008106 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008107
8108 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008109 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008110 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008111
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008112 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008113 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008114
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008115 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008116 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008117
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008118 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008119 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008120
8121#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008122 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008123 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008124
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008125 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008126 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008127#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008128}
8129
Steven Rostedt60a11772008-05-12 21:20:44 +02008130#ifdef CONFIG_FTRACE_SELFTEST
8131/* Let selftest have access to static functions in this file */
8132#include "trace_selftest.c"
8133#endif
8134
Steven Rostedt577b7852009-02-26 23:43:05 -05008135static ssize_t
8136trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8137 loff_t *ppos)
8138{
8139 struct trace_option_dentry *topt = filp->private_data;
8140 char *buf;
8141
8142 if (topt->flags->val & topt->opt->bit)
8143 buf = "1\n";
8144 else
8145 buf = "0\n";
8146
8147 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8148}
8149
8150static ssize_t
8151trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8152 loff_t *ppos)
8153{
8154 struct trace_option_dentry *topt = filp->private_data;
8155 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008156 int ret;
8157
Peter Huewe22fe9b52011-06-07 21:58:27 +02008158 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8159 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008160 return ret;
8161
Li Zefan8d18eaa2009-12-08 11:17:06 +08008162 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008163 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008164
8165 if (!!(topt->flags->val & topt->opt->bit) != val) {
8166 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008167 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008168 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008169 mutex_unlock(&trace_types_lock);
8170 if (ret)
8171 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008172 }
8173
8174 *ppos += cnt;
8175
8176 return cnt;
8177}
8178
8179
8180static const struct file_operations trace_options_fops = {
8181 .open = tracing_open_generic,
8182 .read = trace_options_read,
8183 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008184 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008185};
8186
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008187/*
8188 * In order to pass in both the trace_array descriptor as well as the index
8189 * to the flag that the trace option file represents, the trace_array
8190 * has a character array of trace_flags_index[], which holds the index
8191 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8192 * The address of this character array is passed to the flag option file
8193 * read/write callbacks.
8194 *
8195 * In order to extract both the index and the trace_array descriptor,
8196 * get_tr_index() uses the following algorithm.
8197 *
8198 * idx = *ptr;
8199 *
8200 * As the pointer itself contains the address of the index (remember
8201 * index[1] == 1).
8202 *
8203 * Then to get the trace_array descriptor, by subtracting that index
8204 * from the ptr, we get to the start of the index itself.
8205 *
8206 * ptr - idx == &index[0]
8207 *
8208 * Then a simple container_of() from that pointer gets us to the
8209 * trace_array descriptor.
8210 */
8211static void get_tr_index(void *data, struct trace_array **ptr,
8212 unsigned int *pindex)
8213{
8214 *pindex = *(unsigned char *)data;
8215
8216 *ptr = container_of(data - *pindex, struct trace_array,
8217 trace_flags_index);
8218}
8219
Steven Rostedta8259072009-02-26 22:19:12 -05008220static ssize_t
8221trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8222 loff_t *ppos)
8223{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008224 void *tr_index = filp->private_data;
8225 struct trace_array *tr;
8226 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008227 char *buf;
8228
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008229 get_tr_index(tr_index, &tr, &index);
8230
8231 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008232 buf = "1\n";
8233 else
8234 buf = "0\n";
8235
8236 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8237}
8238
8239static ssize_t
8240trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8241 loff_t *ppos)
8242{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008243 void *tr_index = filp->private_data;
8244 struct trace_array *tr;
8245 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008246 unsigned long val;
8247 int ret;
8248
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008249 get_tr_index(tr_index, &tr, &index);
8250
Peter Huewe22fe9b52011-06-07 21:58:27 +02008251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8252 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008253 return ret;
8254
Zhaoleif2d84b62009-08-07 18:55:48 +08008255 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008256 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008257
Prateek Sood3a53acf2019-12-10 09:15:16 +00008258 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008259 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008260 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008261 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008262 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008263
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008264 if (ret < 0)
8265 return ret;
8266
Steven Rostedta8259072009-02-26 22:19:12 -05008267 *ppos += cnt;
8268
8269 return cnt;
8270}
8271
Steven Rostedta8259072009-02-26 22:19:12 -05008272static const struct file_operations trace_options_core_fops = {
8273 .open = tracing_open_generic,
8274 .read = trace_options_core_read,
8275 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008276 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008277};
8278
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008279struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008280 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008281 struct dentry *parent,
8282 void *data,
8283 const struct file_operations *fops)
8284{
8285 struct dentry *ret;
8286
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008287 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008288 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008289 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008290
8291 return ret;
8292}
8293
8294
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008295static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008296{
8297 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008298
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008299 if (tr->options)
8300 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008301
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008302 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008303 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008304 return NULL;
8305
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008306 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008307 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008308 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008309 return NULL;
8310 }
8311
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008312 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008313}
8314
Steven Rostedt577b7852009-02-26 23:43:05 -05008315static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008316create_trace_option_file(struct trace_array *tr,
8317 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008318 struct tracer_flags *flags,
8319 struct tracer_opt *opt)
8320{
8321 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008322
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008323 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008324 if (!t_options)
8325 return;
8326
8327 topt->flags = flags;
8328 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008329 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008330
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008331 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008332 &trace_options_fops);
8333
Steven Rostedt577b7852009-02-26 23:43:05 -05008334}
8335
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008336static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008337create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008338{
8339 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008340 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008341 struct tracer_flags *flags;
8342 struct tracer_opt *opts;
8343 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008344 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008345
8346 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008347 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008348
8349 flags = tracer->flags;
8350
8351 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008352 return;
8353
8354 /*
8355 * If this is an instance, only create flags for tracers
8356 * the instance may have.
8357 */
8358 if (!trace_ok_for_array(tracer, tr))
8359 return;
8360
8361 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008362 /* Make sure there's no duplicate flags. */
8363 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008364 return;
8365 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008366
8367 opts = flags->opts;
8368
8369 for (cnt = 0; opts[cnt].name; cnt++)
8370 ;
8371
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008372 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008373 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008374 return;
8375
8376 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8377 GFP_KERNEL);
8378 if (!tr_topts) {
8379 kfree(topts);
8380 return;
8381 }
8382
8383 tr->topts = tr_topts;
8384 tr->topts[tr->nr_topts].tracer = tracer;
8385 tr->topts[tr->nr_topts].topts = topts;
8386 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008387
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008388 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008389 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008390 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008391 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008392 "Failed to create trace option: %s",
8393 opts[cnt].name);
8394 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008395}
8396
Steven Rostedta8259072009-02-26 22:19:12 -05008397static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008398create_trace_option_core_file(struct trace_array *tr,
8399 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008400{
8401 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008402
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008403 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008404 if (!t_options)
8405 return NULL;
8406
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008407 return trace_create_file(option, 0644, t_options,
8408 (void *)&tr->trace_flags_index[index],
8409 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008410}
8411
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008412static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008413{
8414 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008415 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008416 int i;
8417
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008418 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008419 if (!t_options)
8420 return;
8421
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008422 for (i = 0; trace_options[i]; i++) {
8423 if (top_level ||
8424 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8425 create_trace_option_core_file(tr, trace_options[i], i);
8426 }
Steven Rostedta8259072009-02-26 22:19:12 -05008427}
8428
Steven Rostedt499e5472012-02-22 15:50:28 -05008429static ssize_t
8430rb_simple_read(struct file *filp, char __user *ubuf,
8431 size_t cnt, loff_t *ppos)
8432{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008433 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008434 char buf[64];
8435 int r;
8436
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008437 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008438 r = sprintf(buf, "%d\n", r);
8439
8440 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8441}
8442
8443static ssize_t
8444rb_simple_write(struct file *filp, const char __user *ubuf,
8445 size_t cnt, loff_t *ppos)
8446{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008447 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008448 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008449 unsigned long val;
8450 int ret;
8451
8452 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8453 if (ret)
8454 return ret;
8455
8456 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008457 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008458 if (!!val == tracer_tracing_is_on(tr)) {
8459 val = 0; /* do nothing */
8460 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008461 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008462 if (tr->current_trace->start)
8463 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008464 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008465 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008466 if (tr->current_trace->stop)
8467 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008468 }
8469 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008470 }
8471
8472 (*ppos)++;
8473
8474 return cnt;
8475}
8476
8477static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008478 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008479 .read = rb_simple_read,
8480 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008481 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008482 .llseek = default_llseek,
8483};
8484
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008485static ssize_t
8486buffer_percent_read(struct file *filp, char __user *ubuf,
8487 size_t cnt, loff_t *ppos)
8488{
8489 struct trace_array *tr = filp->private_data;
8490 char buf[64];
8491 int r;
8492
8493 r = tr->buffer_percent;
8494 r = sprintf(buf, "%d\n", r);
8495
8496 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8497}
8498
8499static ssize_t
8500buffer_percent_write(struct file *filp, const char __user *ubuf,
8501 size_t cnt, loff_t *ppos)
8502{
8503 struct trace_array *tr = filp->private_data;
8504 unsigned long val;
8505 int ret;
8506
8507 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8508 if (ret)
8509 return ret;
8510
8511 if (val > 100)
8512 return -EINVAL;
8513
8514 if (!val)
8515 val = 1;
8516
8517 tr->buffer_percent = val;
8518
8519 (*ppos)++;
8520
8521 return cnt;
8522}
8523
8524static const struct file_operations buffer_percent_fops = {
8525 .open = tracing_open_generic_tr,
8526 .read = buffer_percent_read,
8527 .write = buffer_percent_write,
8528 .release = tracing_release_generic_tr,
8529 .llseek = default_llseek,
8530};
8531
YueHaibingff585c52019-06-14 23:32:10 +08008532static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008533
8534static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008535init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008536
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008537static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008538allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008539{
8540 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008541
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008542 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008543
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008544 buf->tr = tr;
8545
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008546 buf->buffer = ring_buffer_alloc(size, rb_flags);
8547 if (!buf->buffer)
8548 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008549
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008550 buf->data = alloc_percpu(struct trace_array_cpu);
8551 if (!buf->data) {
8552 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008553 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008554 return -ENOMEM;
8555 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008556
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008557 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008558 set_buffer_entries(&tr->array_buffer,
8559 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008560
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008561 return 0;
8562}
8563
8564static int allocate_trace_buffers(struct trace_array *tr, int size)
8565{
8566 int ret;
8567
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008568 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008569 if (ret)
8570 return ret;
8571
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008572#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008573 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8574 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008575 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008576 ring_buffer_free(tr->array_buffer.buffer);
8577 tr->array_buffer.buffer = NULL;
8578 free_percpu(tr->array_buffer.data);
8579 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008580 return -ENOMEM;
8581 }
8582 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008583
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008584 /*
8585 * Only the top level trace array gets its snapshot allocated
8586 * from the kernel command line.
8587 */
8588 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008589#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008590
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008591 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008592}
8593
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008594static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008595{
8596 if (buf->buffer) {
8597 ring_buffer_free(buf->buffer);
8598 buf->buffer = NULL;
8599 free_percpu(buf->data);
8600 buf->data = NULL;
8601 }
8602}
8603
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008604static void free_trace_buffers(struct trace_array *tr)
8605{
8606 if (!tr)
8607 return;
8608
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008609 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008610
8611#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008612 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008613#endif
8614}
8615
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008616static void init_trace_flags_index(struct trace_array *tr)
8617{
8618 int i;
8619
8620 /* Used by the trace options files */
8621 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8622 tr->trace_flags_index[i] = i;
8623}
8624
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008625static void __update_tracer_options(struct trace_array *tr)
8626{
8627 struct tracer *t;
8628
8629 for (t = trace_types; t; t = t->next)
8630 add_tracer_options(tr, t);
8631}
8632
8633static void update_tracer_options(struct trace_array *tr)
8634{
8635 mutex_lock(&trace_types_lock);
8636 __update_tracer_options(tr);
8637 mutex_unlock(&trace_types_lock);
8638}
8639
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008640/* Must have trace_types_lock held */
8641struct trace_array *trace_array_find(const char *instance)
8642{
8643 struct trace_array *tr, *found = NULL;
8644
8645 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8646 if (tr->name && strcmp(tr->name, instance) == 0) {
8647 found = tr;
8648 break;
8649 }
8650 }
8651
8652 return found;
8653}
8654
8655struct trace_array *trace_array_find_get(const char *instance)
8656{
8657 struct trace_array *tr;
8658
8659 mutex_lock(&trace_types_lock);
8660 tr = trace_array_find(instance);
8661 if (tr)
8662 tr->ref++;
8663 mutex_unlock(&trace_types_lock);
8664
8665 return tr;
8666}
8667
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008668static int trace_array_create_dir(struct trace_array *tr)
8669{
8670 int ret;
8671
8672 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8673 if (!tr->dir)
8674 return -EINVAL;
8675
8676 ret = event_trace_add_tracer(tr->dir, tr);
8677 if (ret)
8678 tracefs_remove(tr->dir);
8679
8680 init_tracer_tracefs(tr, tr->dir);
8681 __update_tracer_options(tr);
8682
8683 return ret;
8684}
8685
Divya Indi28879782019-11-20 11:08:38 -08008686static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008687{
Steven Rostedt277ba042012-08-03 16:10:49 -04008688 struct trace_array *tr;
8689 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008690
Steven Rostedt277ba042012-08-03 16:10:49 -04008691 ret = -ENOMEM;
8692 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8693 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008694 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008695
8696 tr->name = kstrdup(name, GFP_KERNEL);
8697 if (!tr->name)
8698 goto out_free_tr;
8699
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008700 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8701 goto out_free_tr;
8702
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008703 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008704
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008705 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8706
Steven Rostedt277ba042012-08-03 16:10:49 -04008707 raw_spin_lock_init(&tr->start_lock);
8708
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008709 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8710
Steven Rostedt277ba042012-08-03 16:10:49 -04008711 tr->current_trace = &nop_trace;
8712
8713 INIT_LIST_HEAD(&tr->systems);
8714 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008715 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008716 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008717
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008718 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008719 goto out_free_tr;
8720
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008721 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008722 goto out_free_tr;
8723
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008724 ftrace_init_trace_array(tr);
8725
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008726 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008727
8728 if (trace_instance_dir) {
8729 ret = trace_array_create_dir(tr);
8730 if (ret)
8731 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09008732 } else
8733 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008734
8735 list_add(&tr->list, &ftrace_trace_arrays);
8736
Divya Indi28879782019-11-20 11:08:38 -08008737 tr->ref++;
8738
Divya Indif45d1222019-03-20 11:28:51 -07008739 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008740
8741 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008742 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008743 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008744 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008745 kfree(tr->name);
8746 kfree(tr);
8747
Divya Indif45d1222019-03-20 11:28:51 -07008748 return ERR_PTR(ret);
8749}
Steven Rostedt277ba042012-08-03 16:10:49 -04008750
Divya Indif45d1222019-03-20 11:28:51 -07008751static int instance_mkdir(const char *name)
8752{
Divya Indi28879782019-11-20 11:08:38 -08008753 struct trace_array *tr;
8754 int ret;
8755
8756 mutex_lock(&event_mutex);
8757 mutex_lock(&trace_types_lock);
8758
8759 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008760 if (trace_array_find(name))
8761 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008762
8763 tr = trace_array_create(name);
8764
8765 ret = PTR_ERR_OR_ZERO(tr);
8766
8767out_unlock:
8768 mutex_unlock(&trace_types_lock);
8769 mutex_unlock(&event_mutex);
8770 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008771}
8772
Divya Indi28879782019-11-20 11:08:38 -08008773/**
8774 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8775 * @name: The name of the trace array to be looked up/created.
8776 *
8777 * Returns pointer to trace array with given name.
8778 * NULL, if it cannot be created.
8779 *
8780 * NOTE: This function increments the reference counter associated with the
8781 * trace array returned. This makes sure it cannot be freed while in use.
8782 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008783 * If the trace_array is to be freed, trace_array_destroy() needs to
8784 * be called after the trace_array_put(), or simply let user space delete
8785 * it from the tracefs instances directory. But until the
8786 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008787 *
8788 */
8789struct trace_array *trace_array_get_by_name(const char *name)
8790{
8791 struct trace_array *tr;
8792
8793 mutex_lock(&event_mutex);
8794 mutex_lock(&trace_types_lock);
8795
8796 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8797 if (tr->name && strcmp(tr->name, name) == 0)
8798 goto out_unlock;
8799 }
8800
8801 tr = trace_array_create(name);
8802
8803 if (IS_ERR(tr))
8804 tr = NULL;
8805out_unlock:
8806 if (tr)
8807 tr->ref++;
8808
8809 mutex_unlock(&trace_types_lock);
8810 mutex_unlock(&event_mutex);
8811 return tr;
8812}
8813EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8814
Divya Indif45d1222019-03-20 11:28:51 -07008815static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008816{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008817 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008818
Divya Indi28879782019-11-20 11:08:38 -08008819 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008820 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07008821 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008822
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008823 list_del(&tr->list);
8824
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008825 /* Disable all the flags that were enabled coming in */
8826 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8827 if ((1 << i) & ZEROED_TRACE_FLAGS)
8828 set_tracer_flag(tr, 1 << i, 0);
8829 }
8830
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008831 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308832 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008833 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008834 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008835 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008836 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008837 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008838
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008839 for (i = 0; i < tr->nr_topts; i++) {
8840 kfree(tr->topts[i].topts);
8841 }
8842 kfree(tr->topts);
8843
Chunyu Hudb9108e02017-07-20 18:36:09 +08008844 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008845 kfree(tr->name);
8846 kfree(tr);
8847
Divya Indif45d1222019-03-20 11:28:51 -07008848 return 0;
8849}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008850
Divya Indie585e642019-08-14 10:55:24 -07008851int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008852{
Divya Indie585e642019-08-14 10:55:24 -07008853 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008854 int ret;
8855
Divya Indie585e642019-08-14 10:55:24 -07008856 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008857 return -EINVAL;
8858
8859 mutex_lock(&event_mutex);
8860 mutex_lock(&trace_types_lock);
8861
Divya Indie585e642019-08-14 10:55:24 -07008862 ret = -ENODEV;
8863
8864 /* Making sure trace array exists before destroying it. */
8865 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8866 if (tr == this_tr) {
8867 ret = __remove_instance(tr);
8868 break;
8869 }
8870 }
Divya Indif45d1222019-03-20 11:28:51 -07008871
8872 mutex_unlock(&trace_types_lock);
8873 mutex_unlock(&event_mutex);
8874
8875 return ret;
8876}
8877EXPORT_SYMBOL_GPL(trace_array_destroy);
8878
8879static int instance_rmdir(const char *name)
8880{
8881 struct trace_array *tr;
8882 int ret;
8883
8884 mutex_lock(&event_mutex);
8885 mutex_lock(&trace_types_lock);
8886
8887 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008888 tr = trace_array_find(name);
8889 if (tr)
8890 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008891
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008892 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008893 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008894
8895 return ret;
8896}
8897
Steven Rostedt277ba042012-08-03 16:10:49 -04008898static __init void create_trace_instances(struct dentry *d_tracer)
8899{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008900 struct trace_array *tr;
8901
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008902 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8903 instance_mkdir,
8904 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008905 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008906 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008907
8908 mutex_lock(&event_mutex);
8909 mutex_lock(&trace_types_lock);
8910
8911 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8912 if (!tr->name)
8913 continue;
8914 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8915 "Failed to create instance directory\n"))
8916 break;
8917 }
8918
8919 mutex_unlock(&trace_types_lock);
8920 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008921}
8922
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008923static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008924init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008925{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008926 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008927 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008928
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008929 trace_create_file("available_tracers", 0444, d_tracer,
8930 tr, &show_traces_fops);
8931
8932 trace_create_file("current_tracer", 0644, d_tracer,
8933 tr, &set_tracer_fops);
8934
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008935 trace_create_file("tracing_cpumask", 0644, d_tracer,
8936 tr, &tracing_cpumask_fops);
8937
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008938 trace_create_file("trace_options", 0644, d_tracer,
8939 tr, &tracing_iter_fops);
8940
8941 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008942 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008943
8944 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008945 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008946
8947 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008948 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008949
8950 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8951 tr, &tracing_total_entries_fops);
8952
Wang YanQing238ae932013-05-26 16:52:01 +08008953 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008954 tr, &tracing_free_buffer_fops);
8955
8956 trace_create_file("trace_marker", 0220, d_tracer,
8957 tr, &tracing_mark_fops);
8958
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008959 file = __find_event_file(tr, "ftrace", "print");
8960 if (file && file->dir)
8961 trace_create_file("trigger", 0644, file->dir, file,
8962 &event_trigger_fops);
8963 tr->trace_marker_file = file;
8964
Steven Rostedtfa32e852016-07-06 15:25:08 -04008965 trace_create_file("trace_marker_raw", 0220, d_tracer,
8966 tr, &tracing_mark_raw_fops);
8967
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008968 trace_create_file("trace_clock", 0644, d_tracer, tr,
8969 &trace_clock_fops);
8970
8971 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008972 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008973
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008974 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8975 &trace_time_stamp_mode_fops);
8976
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008977 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008978
8979 trace_create_file("buffer_percent", 0444, d_tracer,
8980 tr, &buffer_percent_fops);
8981
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008982 create_trace_options_dir(tr);
8983
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008984#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008985 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008986#endif
8987
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008988 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008989 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008990
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008991#ifdef CONFIG_TRACER_SNAPSHOT
8992 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008993 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008994#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008995
Tom Zanussi8a062902019-03-31 18:48:15 -05008996 trace_create_file("error_log", 0644, d_tracer,
8997 tr, &tracing_err_log_fops);
8998
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008999 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009000 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009001
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009002 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009003}
9004
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009005static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009006{
9007 struct vfsmount *mnt;
9008 struct file_system_type *type;
9009
9010 /*
9011 * To maintain backward compatibility for tools that mount
9012 * debugfs to get to the tracing facility, tracefs is automatically
9013 * mounted to the debugfs/tracing directory.
9014 */
9015 type = get_fs_type("tracefs");
9016 if (!type)
9017 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009018 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009019 put_filesystem(type);
9020 if (IS_ERR(mnt))
9021 return NULL;
9022 mntget(mnt);
9023
9024 return mnt;
9025}
9026
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009027/**
9028 * tracing_init_dentry - initialize top level trace array
9029 *
9030 * This is called when creating files or directories in the tracing
9031 * directory. It is called via fs_initcall() by any of the boot up code
9032 * and expects to return the dentry of the top level tracing directory.
9033 */
Wei Yang22c36b12020-07-12 09:10:36 +08009034int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009035{
9036 struct trace_array *tr = &global_trace;
9037
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009038 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009039 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009040 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009041 }
9042
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009043 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009044 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009045 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009046
Peter Enderborg072e1332020-07-16 09:15:10 +02009047 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009048 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009049
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009050 /*
9051 * As there may still be users that expect the tracing
9052 * files to exist in debugfs/tracing, we must automount
9053 * the tracefs file system there, so older tools still
9054 * work with the newer kerenl.
9055 */
9056 tr->dir = debugfs_create_automount("tracing", NULL,
9057 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009058
Wei Yang22c36b12020-07-12 09:10:36 +08009059 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009060}
9061
Jeremy Linton00f4b652017-05-31 16:56:43 -05009062extern struct trace_eval_map *__start_ftrace_eval_maps[];
9063extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009064
Jeremy Linton5f60b352017-05-31 16:56:47 -05009065static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009066{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009067 int len;
9068
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009069 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009070 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009071}
9072
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009073#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009074static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009075{
Jeremy Linton99be6472017-05-31 16:56:44 -05009076 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009077 return;
9078
9079 /*
9080 * Modules with bad taint do not have events created, do
9081 * not bother with enums either.
9082 */
9083 if (trace_module_has_bad_taint(mod))
9084 return;
9085
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009086 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009087}
9088
Jeremy Linton681bec02017-05-31 16:56:53 -05009089#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009090static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009091{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009092 union trace_eval_map_item *map;
9093 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009094
Jeremy Linton99be6472017-05-31 16:56:44 -05009095 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009096 return;
9097
Jeremy Linton1793ed92017-05-31 16:56:46 -05009098 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009099
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009100 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009101
9102 while (map) {
9103 if (map->head.mod == mod)
9104 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009105 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009106 last = &map->tail.next;
9107 map = map->tail.next;
9108 }
9109 if (!map)
9110 goto out;
9111
Jeremy Linton5f60b352017-05-31 16:56:47 -05009112 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009113 kfree(map);
9114 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009115 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009116}
9117#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009118static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009119#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009120
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009121static int trace_module_notify(struct notifier_block *self,
9122 unsigned long val, void *data)
9123{
9124 struct module *mod = data;
9125
9126 switch (val) {
9127 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009128 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009129 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009130 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009131 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009132 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009133 }
9134
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009135 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009136}
9137
9138static struct notifier_block trace_module_nb = {
9139 .notifier_call = trace_module_notify,
9140 .priority = 0,
9141};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009142#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009143
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009144static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009145{
Wei Yang22c36b12020-07-12 09:10:36 +08009146 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009147
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009148 trace_access_lock_init();
9149
Wei Yang22c36b12020-07-12 09:10:36 +08009150 ret = tracing_init_dentry();
9151 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009152 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009153
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009154 event_trace_init();
9155
Wei Yang22c36b12020-07-12 09:10:36 +08009156 init_tracer_tracefs(&global_trace, NULL);
9157 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009158
Wei Yang22c36b12020-07-12 09:10:36 +08009159 trace_create_file("tracing_thresh", 0644, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009160 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009161
Wei Yang22c36b12020-07-12 09:10:36 +08009162 trace_create_file("README", 0444, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009163 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009164
Wei Yang22c36b12020-07-12 09:10:36 +08009165 trace_create_file("saved_cmdlines", 0444, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009166 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009167
Wei Yang22c36b12020-07-12 09:10:36 +08009168 trace_create_file("saved_cmdlines_size", 0644, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009169 NULL, &tracing_saved_cmdlines_size_fops);
9170
Wei Yang22c36b12020-07-12 09:10:36 +08009171 trace_create_file("saved_tgids", 0444, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009172 NULL, &tracing_saved_tgids_fops);
9173
Jeremy Linton5f60b352017-05-31 16:56:47 -05009174 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009175
Wei Yang22c36b12020-07-12 09:10:36 +08009176 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009177
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009178#ifdef CONFIG_MODULES
9179 register_module_notifier(&trace_module_nb);
9180#endif
9181
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009182#ifdef CONFIG_DYNAMIC_FTRACE
Wei Yang22c36b12020-07-12 09:10:36 +08009183 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009184 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009185#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009186
Wei Yang22c36b12020-07-12 09:10:36 +08009187 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009188
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009189 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009190
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009191 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009192}
9193
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009194static int trace_panic_handler(struct notifier_block *this,
9195 unsigned long event, void *unused)
9196{
Steven Rostedt944ac422008-10-23 19:26:08 -04009197 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009198 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009199 return NOTIFY_OK;
9200}
9201
9202static struct notifier_block trace_panic_notifier = {
9203 .notifier_call = trace_panic_handler,
9204 .next = NULL,
9205 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9206};
9207
9208static int trace_die_handler(struct notifier_block *self,
9209 unsigned long val,
9210 void *data)
9211{
9212 switch (val) {
9213 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009214 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009215 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009216 break;
9217 default:
9218 break;
9219 }
9220 return NOTIFY_OK;
9221}
9222
9223static struct notifier_block trace_die_notifier = {
9224 .notifier_call = trace_die_handler,
9225 .priority = 200
9226};
9227
9228/*
9229 * printk is set to max of 1024, we really don't need it that big.
9230 * Nothing should be printing 1000 characters anyway.
9231 */
9232#define TRACE_MAX_PRINT 1000
9233
9234/*
9235 * Define here KERN_TRACE so that we have one place to modify
9236 * it if we decide to change what log level the ftrace dump
9237 * should be at.
9238 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009239#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009240
Jason Wessel955b61e2010-08-05 09:22:23 -05009241void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009242trace_printk_seq(struct trace_seq *s)
9243{
9244 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009245 if (s->seq.len >= TRACE_MAX_PRINT)
9246 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009247
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009248 /*
9249 * More paranoid code. Although the buffer size is set to
9250 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9251 * an extra layer of protection.
9252 */
9253 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9254 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009255
9256 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009257 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009258
9259 printk(KERN_TRACE "%s", s->buffer);
9260
Steven Rostedtf9520752009-03-02 14:04:40 -05009261 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009262}
9263
Jason Wessel955b61e2010-08-05 09:22:23 -05009264void trace_init_global_iter(struct trace_iterator *iter)
9265{
9266 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009267 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009268 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009269 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009270
9271 if (iter->trace && iter->trace->open)
9272 iter->trace->open(iter);
9273
9274 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009275 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009276 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9277
9278 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9279 if (trace_clocks[iter->tr->clock_id].in_ns)
9280 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009281}
9282
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009283void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009284{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009285 /* use static because iter can be a bit big for the stack */
9286 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009287 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009288 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009289 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009290 unsigned long flags;
9291 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009292
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009293 /* Only allow one dump user at a time. */
9294 if (atomic_inc_return(&dump_running) != 1) {
9295 atomic_dec(&dump_running);
9296 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009297 }
9298
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009299 /*
9300 * Always turn off tracing when we dump.
9301 * We don't need to show trace output of what happens
9302 * between multiple crashes.
9303 *
9304 * If the user does a sysrq-z, then they can re-enable
9305 * tracing with echo 1 > tracing_on.
9306 */
9307 tracing_off();
9308
9309 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009310 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009311
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009312 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009313 trace_init_global_iter(&iter);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009314 /* Can not use kmalloc for iter.temp */
9315 iter.temp = static_temp_buf;
9316 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009317
Steven Rostedtd7690412008-10-01 00:29:53 -04009318 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009319 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009320 }
9321
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009322 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009323
Török Edwinb54d3de2008-11-22 13:28:48 +02009324 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009325 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009326
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009327 switch (oops_dump_mode) {
9328 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009329 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009330 break;
9331 case DUMP_ORIG:
9332 iter.cpu_file = raw_smp_processor_id();
9333 break;
9334 case DUMP_NONE:
9335 goto out_enable;
9336 default:
9337 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009338 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009339 }
9340
9341 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009342
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009343 /* Did function tracer already get disabled? */
9344 if (ftrace_is_dead()) {
9345 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9346 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9347 }
9348
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009349 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009350 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009351 * the next buffer. This is a bit expensive, but is
9352 * not done often. We fill all what we can read,
9353 * and then release the locks again.
9354 */
9355
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009356 while (!trace_empty(&iter)) {
9357
9358 if (!cnt)
9359 printk(KERN_TRACE "---------------------------------\n");
9360
9361 cnt++;
9362
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009363 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009364 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009365
Jason Wessel955b61e2010-08-05 09:22:23 -05009366 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009367 int ret;
9368
9369 ret = print_trace_line(&iter);
9370 if (ret != TRACE_TYPE_NO_CONSUME)
9371 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009372 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009373 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009374
9375 trace_printk_seq(&iter.seq);
9376 }
9377
9378 if (!cnt)
9379 printk(KERN_TRACE " (ftrace buffer empty)\n");
9380 else
9381 printk(KERN_TRACE "---------------------------------\n");
9382
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009383 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009384 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009385
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009386 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009387 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009388 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009389 atomic_dec(&dump_running);
9390 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009391 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009392}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009393EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009394
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009395int trace_run_command(const char *buf, int (*createfn)(int, char **))
9396{
9397 char **argv;
9398 int argc, ret;
9399
9400 argc = 0;
9401 ret = 0;
9402 argv = argv_split(GFP_KERNEL, buf, &argc);
9403 if (!argv)
9404 return -ENOMEM;
9405
9406 if (argc)
9407 ret = createfn(argc, argv);
9408
9409 argv_free(argv);
9410
9411 return ret;
9412}
9413
9414#define WRITE_BUFSIZE 4096
9415
9416ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9417 size_t count, loff_t *ppos,
9418 int (*createfn)(int, char **))
9419{
9420 char *kbuf, *buf, *tmp;
9421 int ret = 0;
9422 size_t done = 0;
9423 size_t size;
9424
9425 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9426 if (!kbuf)
9427 return -ENOMEM;
9428
9429 while (done < count) {
9430 size = count - done;
9431
9432 if (size >= WRITE_BUFSIZE)
9433 size = WRITE_BUFSIZE - 1;
9434
9435 if (copy_from_user(kbuf, buffer + done, size)) {
9436 ret = -EFAULT;
9437 goto out;
9438 }
9439 kbuf[size] = '\0';
9440 buf = kbuf;
9441 do {
9442 tmp = strchr(buf, '\n');
9443 if (tmp) {
9444 *tmp = '\0';
9445 size = tmp - buf + 1;
9446 } else {
9447 size = strlen(buf);
9448 if (done + size < count) {
9449 if (buf != kbuf)
9450 break;
9451 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9452 pr_warn("Line length is too long: Should be less than %d\n",
9453 WRITE_BUFSIZE - 2);
9454 ret = -EINVAL;
9455 goto out;
9456 }
9457 }
9458 done += size;
9459
9460 /* Remove comments */
9461 tmp = strchr(buf, '#');
9462
9463 if (tmp)
9464 *tmp = '\0';
9465
9466 ret = trace_run_command(buf, createfn);
9467 if (ret)
9468 goto out;
9469 buf += size;
9470
9471 } while (done < count);
9472 }
9473 ret = done;
9474
9475out:
9476 kfree(kbuf);
9477
9478 return ret;
9479}
9480
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009481__init static int tracer_alloc_buffers(void)
9482{
Steven Rostedt73c51622009-03-11 13:42:01 -04009483 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309484 int ret = -ENOMEM;
9485
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009486
9487 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009488 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009489 return -EPERM;
9490 }
9491
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009492 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009493 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009494 * than we have bits for.
9495 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009496 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009497
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309498 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9499 goto out;
9500
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009501 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309502 goto out_free_buffer_mask;
9503
Steven Rostedt07d777f2011-09-22 14:01:55 -04009504 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009505 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009506 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009507 trace_printk_init_buffers();
9508
Steven Rostedt73c51622009-03-11 13:42:01 -04009509 /* To save memory, keep the ring buffer size to its minimum */
9510 if (ring_buffer_expanded)
9511 ring_buf_size = trace_buf_size;
9512 else
9513 ring_buf_size = 1;
9514
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309515 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009516 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009517
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009518 raw_spin_lock_init(&global_trace.start_lock);
9519
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009520 /*
9521 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009522 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009523 * the buffer, then the user would lose any trace that was in the
9524 * buffer. The memory will be removed once the "instance" is removed.
9525 */
9526 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9527 "trace/RB:preapre", trace_rb_cpu_prepare,
9528 NULL);
9529 if (ret < 0)
9530 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009531 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009532 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009533 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9534 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009535 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009536
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009537 if (trace_create_savedcmd() < 0)
9538 goto out_free_temp_buffer;
9539
Steven Rostedtab464282008-05-12 21:21:00 +02009540 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009541 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009542 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009543 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009544 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009545
Steven Rostedt499e5472012-02-22 15:50:28 -05009546 if (global_trace.buffer_disabled)
9547 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009548
Steven Rostedte1e232c2014-02-10 23:38:46 -05009549 if (trace_boot_clock) {
9550 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9551 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009552 pr_warn("Trace clock %s not defined, going back to default\n",
9553 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009554 }
9555
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009556 /*
9557 * register_tracer() might reference current_trace, so it
9558 * needs to be set before we register anything. This is
9559 * just a bootstrap of current_trace anyway.
9560 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009561 global_trace.current_trace = &nop_trace;
9562
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009563 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9564
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009565 ftrace_init_global_array_ops(&global_trace);
9566
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009567 init_trace_flags_index(&global_trace);
9568
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009569 register_tracer(&nop_trace);
9570
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009571 /* Function tracing may start here (via kernel command line) */
9572 init_function_trace();
9573
Steven Rostedt60a11772008-05-12 21:20:44 +02009574 /* All seems OK, enable tracing */
9575 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009576
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009577 atomic_notifier_chain_register(&panic_notifier_list,
9578 &trace_panic_notifier);
9579
9580 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009581
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009582 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9583
9584 INIT_LIST_HEAD(&global_trace.systems);
9585 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009586 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009587 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009588 list_add(&global_trace.list, &ftrace_trace_arrays);
9589
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009590 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009591
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009592 register_snapshot_cmd();
9593
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009594 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009595
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009596out_free_savedcmd:
9597 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009598out_free_temp_buffer:
9599 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009600out_rm_hp_state:
9601 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309602out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009603 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309604out_free_buffer_mask:
9605 free_cpumask_var(tracing_buffer_mask);
9606out:
9607 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009608}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009609
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009610void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009611{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009612 if (tracepoint_printk) {
9613 tracepoint_print_iter =
9614 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009615 if (MEM_FAIL(!tracepoint_print_iter,
9616 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009617 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009618 else
9619 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009620 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009621 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009622}
9623
9624void __init trace_init(void)
9625{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009626 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009627}
9628
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009629__init static int clear_boot_tracer(void)
9630{
9631 /*
9632 * The default tracer at boot buffer is an init section.
9633 * This function is called in lateinit. If we did not
9634 * find the boot tracer, then clear it out, to prevent
9635 * later registration from accessing the buffer that is
9636 * about to be freed.
9637 */
9638 if (!default_bootup_tracer)
9639 return 0;
9640
9641 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9642 default_bootup_tracer);
9643 default_bootup_tracer = NULL;
9644
9645 return 0;
9646}
9647
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009648fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009649late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009650
9651#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9652__init static int tracing_set_default_clock(void)
9653{
9654 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009655 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009656 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9657 pr_warn("Can not set tracing clock due to lockdown\n");
9658 return -EPERM;
9659 }
9660
Chris Wilson3fd49c92018-03-30 16:01:31 +01009661 printk(KERN_WARNING
9662 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9663 "If you want to keep using the local clock, then add:\n"
9664 " \"trace_clock=local\"\n"
9665 "on the kernel command line\n");
9666 tracing_set_clock(&global_trace, "global");
9667 }
9668
9669 return 0;
9670}
9671late_initcall_sync(tracing_set_default_clock);
9672#endif