blob: 6048fba2f590349e054facd033949dd5ca210dd3 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900165int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500166static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200167 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168
Li Zefanee6c2c12009-09-18 14:06:47 +0800169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500171static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500173static bool allocate_snapshot;
174
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500178 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400179 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500180 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181 return 1;
182}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200183__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt944ac422008-10-23 19:26:08 -0400185static int __init set_ftrace_dump_on_oops(char *str)
186{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200200
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201static int __init stop_trace_on_warning(char *str)
202{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205 return 1;
206}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200207__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400208
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400209static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500210{
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400216__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500217
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400220
221static int __init set_trace_boot_options(char *str)
222{
Chen Gang67012ab2013-04-08 12:06:44 +0800223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
Steven Rostedte1e232c2014-02-10 23:38:46 -0500228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400246
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100247unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300254static void
255trace_process_export(struct trace_export *export,
256 struct ring_buffer_event *event, int flag)
257{
258 struct trace_entry *entry;
259 unsigned int size = 0;
260
261 if (export->flags & flag) {
262 entry = ring_buffer_event_data(event);
263 size = ring_buffer_event_length(event);
264 export->write(export, entry, size);
265 }
266}
267
268static DEFINE_MUTEX(ftrace_export_lock);
269
270static struct trace_export __rcu *ftrace_exports_list __read_mostly;
271
272static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
273static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300274static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300275
276static inline void ftrace_exports_enable(struct trace_export *export)
277{
278 if (export->flags & TRACE_EXPORT_FUNCTION)
279 static_branch_inc(&trace_function_exports_enabled);
280
281 if (export->flags & TRACE_EXPORT_EVENT)
282 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300283
284 if (export->flags & TRACE_EXPORT_MARKER)
285 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300286}
287
288static inline void ftrace_exports_disable(struct trace_export *export)
289{
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_dec(&trace_function_exports_enabled);
292
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300295
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300298}
299
300static void ftrace_exports(struct ring_buffer_event *event, int flag)
301{
302 struct trace_export *export;
303
304 preempt_disable_notrace();
305
306 export = rcu_dereference_raw_check(ftrace_exports_list);
307 while (export) {
308 trace_process_export(export, event, flag);
309 export = rcu_dereference_raw_check(export->next);
310 }
311
312 preempt_enable_notrace();
313}
314
315static inline void
316add_trace_export(struct trace_export **list, struct trace_export *export)
317{
318 rcu_assign_pointer(export->next, *list);
319 /*
320 * We are entering export into the list but another
321 * CPU might be walking that list. We need to make sure
322 * the export->next pointer is valid before another CPU sees
323 * the export pointer included into the list.
324 */
325 rcu_assign_pointer(*list, export);
326}
327
328static inline int
329rm_trace_export(struct trace_export **list, struct trace_export *export)
330{
331 struct trace_export **p;
332
333 for (p = list; *p != NULL; p = &(*p)->next)
334 if (*p == export)
335 break;
336
337 if (*p != export)
338 return -1;
339
340 rcu_assign_pointer(*p, (*p)->next);
341
342 return 0;
343}
344
345static inline void
346add_ftrace_export(struct trace_export **list, struct trace_export *export)
347{
348 ftrace_exports_enable(export);
349
350 add_trace_export(list, export);
351}
352
353static inline int
354rm_ftrace_export(struct trace_export **list, struct trace_export *export)
355{
356 int ret;
357
358 ret = rm_trace_export(list, export);
359 ftrace_exports_disable(export);
360
361 return ret;
362}
363
364int register_ftrace_export(struct trace_export *export)
365{
366 if (WARN_ON_ONCE(!export->write))
367 return -1;
368
369 mutex_lock(&ftrace_export_lock);
370
371 add_ftrace_export(&ftrace_exports_list, export);
372
373 mutex_unlock(&ftrace_export_lock);
374
375 return 0;
376}
377EXPORT_SYMBOL_GPL(register_ftrace_export);
378
379int unregister_ftrace_export(struct trace_export *export)
380{
381 int ret;
382
383 mutex_lock(&ftrace_export_lock);
384
385 ret = rm_ftrace_export(&ftrace_exports_list, export);
386
387 mutex_unlock(&ftrace_export_lock);
388
389 return ret;
390}
391EXPORT_SYMBOL_GPL(unregister_ftrace_export);
392
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400393/* trace_flags holds trace_options default values */
394#define TRACE_DEFAULT_FLAGS \
395 (FUNCTION_DEFAULT_FLAGS | \
396 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
397 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
398 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
399 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
400
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400401/* trace_options that are only supported by global_trace */
402#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
403 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
404
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400405/* trace_flags that are default zero for instances */
406#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900407 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400408
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200409/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800410 * The global_trace is the descriptor that holds the top-level tracing
411 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200412 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400413static struct trace_array global_trace = {
414 .trace_flags = TRACE_DEFAULT_FLAGS,
415};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200416
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400417LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200418
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400419int trace_array_get(struct trace_array *this_tr)
420{
421 struct trace_array *tr;
422 int ret = -ENODEV;
423
424 mutex_lock(&trace_types_lock);
425 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
426 if (tr == this_tr) {
427 tr->ref++;
428 ret = 0;
429 break;
430 }
431 }
432 mutex_unlock(&trace_types_lock);
433
434 return ret;
435}
436
437static void __trace_array_put(struct trace_array *this_tr)
438{
439 WARN_ON(!this_tr->ref);
440 this_tr->ref--;
441}
442
Divya Indi28879782019-11-20 11:08:38 -0800443/**
444 * trace_array_put - Decrement the reference counter for this trace array.
445 *
446 * NOTE: Use this when we no longer need the trace array returned by
447 * trace_array_get_by_name(). This ensures the trace array can be later
448 * destroyed.
449 *
450 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400451void trace_array_put(struct trace_array *this_tr)
452{
Divya Indi28879782019-11-20 11:08:38 -0800453 if (!this_tr)
454 return;
455
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400456 mutex_lock(&trace_types_lock);
457 __trace_array_put(this_tr);
458 mutex_unlock(&trace_types_lock);
459}
Divya Indi28879782019-11-20 11:08:38 -0800460EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400461
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400462int tracing_check_open_get_tr(struct trace_array *tr)
463{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400464 int ret;
465
466 ret = security_locked_down(LOCKDOWN_TRACEFS);
467 if (ret)
468 return ret;
469
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400470 if (tracing_disabled)
471 return -ENODEV;
472
473 if (tr && trace_array_get(tr) < 0)
474 return -ENODEV;
475
476 return 0;
477}
478
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400479int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500480 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500481 struct ring_buffer_event *event)
482{
483 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
484 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400485 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500486 return 1;
487 }
488
489 return 0;
490}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500491
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400492void trace_free_pid_list(struct trace_pid_list *pid_list)
493{
494 vfree(pid_list->pids);
495 kfree(pid_list);
496}
497
Steven Rostedtd8275c42016-04-14 12:15:22 -0400498/**
499 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
500 * @filtered_pids: The list of pids to check
501 * @search_pid: The PID to find in @filtered_pids
502 *
503 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
504 */
505bool
506trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
507{
508 /*
509 * If pid_max changed after filtered_pids was created, we
510 * by default ignore all pids greater than the previous pid_max.
511 */
512 if (search_pid >= filtered_pids->pid_max)
513 return false;
514
515 return test_bit(search_pid, filtered_pids->pids);
516}
517
518/**
519 * trace_ignore_this_task - should a task be ignored for tracing
520 * @filtered_pids: The list of pids to check
521 * @task: The task that should be ignored if not filtered
522 *
523 * Checks if @task should be traced or not from @filtered_pids.
524 * Returns true if @task should *NOT* be traced.
525 * Returns false if @task should be traced.
526 */
527bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400528trace_ignore_this_task(struct trace_pid_list *filtered_pids,
529 struct trace_pid_list *filtered_no_pids,
530 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400531{
532 /*
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400533 * If filterd_no_pids is not empty, and the task's pid is listed
534 * in filtered_no_pids, then return true.
535 * Otherwise, if filtered_pids is empty, that means we can
536 * trace all tasks. If it has content, then only trace pids
537 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400538 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400539
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400540 return (filtered_pids &&
541 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
542 (filtered_no_pids &&
543 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400544}
545
546/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700547 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400548 * @pid_list: The list to modify
549 * @self: The current task for fork or NULL for exit
550 * @task: The task to add or remove
551 *
552 * If adding a task, if @self is defined, the task is only added if @self
553 * is also included in @pid_list. This happens on fork and tasks should
554 * only be added when the parent is listed. If @self is NULL, then the
555 * @task pid will be removed from the list, which would happen on exit
556 * of a task.
557 */
558void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
559 struct task_struct *self,
560 struct task_struct *task)
561{
562 if (!pid_list)
563 return;
564
565 /* For forks, we only add if the forking task is listed */
566 if (self) {
567 if (!trace_find_filtered_pid(pid_list, self->pid))
568 return;
569 }
570
571 /* Sorry, but we don't support pid_max changing after setting */
572 if (task->pid >= pid_list->pid_max)
573 return;
574
575 /* "self" is set for forks, and NULL for exits */
576 if (self)
577 set_bit(task->pid, pid_list->pids);
578 else
579 clear_bit(task->pid, pid_list->pids);
580}
581
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400582/**
583 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
584 * @pid_list: The pid list to show
585 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
586 * @pos: The position of the file
587 *
588 * This is used by the seq_file "next" operation to iterate the pids
589 * listed in a trace_pid_list structure.
590 *
591 * Returns the pid+1 as we want to display pid of zero, but NULL would
592 * stop the iteration.
593 */
594void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
595{
596 unsigned long pid = (unsigned long)v;
597
598 (*pos)++;
599
600 /* pid already is +1 of the actual prevous bit */
601 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
602
603 /* Return pid + 1 to allow zero to be represented */
604 if (pid < pid_list->pid_max)
605 return (void *)(pid + 1);
606
607 return NULL;
608}
609
610/**
611 * trace_pid_start - Used for seq_file to start reading pid lists
612 * @pid_list: The pid list to show
613 * @pos: The position of the file
614 *
615 * This is used by seq_file "start" operation to start the iteration
616 * of listing pids.
617 *
618 * Returns the pid+1 as we want to display pid of zero, but NULL would
619 * stop the iteration.
620 */
621void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
622{
623 unsigned long pid;
624 loff_t l = 0;
625
626 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
627 if (pid >= pid_list->pid_max)
628 return NULL;
629
630 /* Return pid + 1 so that zero can be the exit value */
631 for (pid++; pid && l < *pos;
632 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
633 ;
634 return (void *)pid;
635}
636
637/**
638 * trace_pid_show - show the current pid in seq_file processing
639 * @m: The seq_file structure to write into
640 * @v: A void pointer of the pid (+1) value to display
641 *
642 * Can be directly used by seq_file operations to display the current
643 * pid value.
644 */
645int trace_pid_show(struct seq_file *m, void *v)
646{
647 unsigned long pid = (unsigned long)v - 1;
648
649 seq_printf(m, "%lu\n", pid);
650 return 0;
651}
652
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400653/* 128 should be much more than enough */
654#define PID_BUF_SIZE 127
655
656int trace_pid_write(struct trace_pid_list *filtered_pids,
657 struct trace_pid_list **new_pid_list,
658 const char __user *ubuf, size_t cnt)
659{
660 struct trace_pid_list *pid_list;
661 struct trace_parser parser;
662 unsigned long val;
663 int nr_pids = 0;
664 ssize_t read = 0;
665 ssize_t ret = 0;
666 loff_t pos;
667 pid_t pid;
668
669 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
670 return -ENOMEM;
671
672 /*
673 * Always recreate a new array. The write is an all or nothing
674 * operation. Always create a new array when adding new pids by
675 * the user. If the operation fails, then the current list is
676 * not modified.
677 */
678 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500679 if (!pid_list) {
680 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400681 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500682 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400683
684 pid_list->pid_max = READ_ONCE(pid_max);
685
686 /* Only truncating will shrink pid_max */
687 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
688 pid_list->pid_max = filtered_pids->pid_max;
689
690 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
691 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500692 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400693 kfree(pid_list);
694 return -ENOMEM;
695 }
696
697 if (filtered_pids) {
698 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000699 for_each_set_bit(pid, filtered_pids->pids,
700 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400701 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400702 nr_pids++;
703 }
704 }
705
706 while (cnt > 0) {
707
708 pos = 0;
709
710 ret = trace_get_user(&parser, ubuf, cnt, &pos);
711 if (ret < 0 || !trace_parser_loaded(&parser))
712 break;
713
714 read += ret;
715 ubuf += ret;
716 cnt -= ret;
717
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400718 ret = -EINVAL;
719 if (kstrtoul(parser.buffer, 0, &val))
720 break;
721 if (val >= pid_list->pid_max)
722 break;
723
724 pid = (pid_t)val;
725
726 set_bit(pid, pid_list->pids);
727 nr_pids++;
728
729 trace_parser_clear(&parser);
730 ret = 0;
731 }
732 trace_parser_put(&parser);
733
734 if (ret < 0) {
735 trace_free_pid_list(pid_list);
736 return ret;
737 }
738
739 if (!nr_pids) {
740 /* Cleared the list of pids */
741 trace_free_pid_list(pid_list);
742 read = ret;
743 pid_list = NULL;
744 }
745
746 *new_pid_list = pid_list;
747
748 return read;
749}
750
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500751static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400752{
753 u64 ts;
754
755 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700756 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400757 return trace_clock_local();
758
Alexander Z Lam94571582013-08-02 18:36:16 -0700759 ts = ring_buffer_time_stamp(buf->buffer, cpu);
760 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400761
762 return ts;
763}
764
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100765u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700766{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500767 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700768}
769
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400770/**
771 * tracing_is_enabled - Show if global_trace has been disabled
772 *
773 * Shows if the global trace has been enabled or not. It uses the
774 * mirror flag "buffer_disabled" to be used in fast paths such as for
775 * the irqsoff tracer. But it may be inaccurate due to races. If you
776 * need to know the accurate state, use tracing_is_on() which is a little
777 * slower, but accurate.
778 */
Steven Rostedt90369902008-11-05 16:05:44 -0500779int tracing_is_enabled(void)
780{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400781 /*
782 * For quick access (irqsoff uses this in fast path), just
783 * return the mirror variable of the state of the ring buffer.
784 * It's a little racy, but we don't really care.
785 */
786 smp_rmb();
787 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500788}
789
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200790/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400791 * trace_buf_size is the size in bytes that is allocated
792 * for a buffer. Note, the number of bytes is always rounded
793 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400794 *
795 * This number is purposely set to a low number of 16384.
796 * If the dump on oops happens, it will be much appreciated
797 * to not have to wait for all that output. Anyway this can be
798 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400800#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400801
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400802static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200803
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200804/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200805static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200806
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200807/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200808 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200809 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700810DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200811
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800812/*
813 * serialize the access of the ring buffer
814 *
815 * ring buffer serializes readers, but it is low level protection.
816 * The validity of the events (which returns by ring_buffer_peek() ..etc)
817 * are not protected by ring buffer.
818 *
819 * The content of events may become garbage if we allow other process consumes
820 * these events concurrently:
821 * A) the page of the consumed events may become a normal page
822 * (not reader page) in ring buffer, and this page will be rewrited
823 * by events producer.
824 * B) The page of the consumed events may become a page for splice_read,
825 * and this page will be returned to system.
826 *
827 * These primitives allow multi process access to different cpu ring buffer
828 * concurrently.
829 *
830 * These primitives don't distinguish read-only and read-consume access.
831 * Multi read-only access are also serialized.
832 */
833
834#ifdef CONFIG_SMP
835static DECLARE_RWSEM(all_cpu_access_lock);
836static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
837
838static inline void trace_access_lock(int cpu)
839{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500840 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800841 /* gain it for accessing the whole ring buffer. */
842 down_write(&all_cpu_access_lock);
843 } else {
844 /* gain it for accessing a cpu ring buffer. */
845
Steven Rostedtae3b5092013-01-23 15:22:59 -0500846 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800847 down_read(&all_cpu_access_lock);
848
849 /* Secondly block other access to this @cpu ring buffer. */
850 mutex_lock(&per_cpu(cpu_access_lock, cpu));
851 }
852}
853
854static inline void trace_access_unlock(int cpu)
855{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500856 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800857 up_write(&all_cpu_access_lock);
858 } else {
859 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 up_read(&all_cpu_access_lock);
861 }
862}
863
864static inline void trace_access_lock_init(void)
865{
866 int cpu;
867
868 for_each_possible_cpu(cpu)
869 mutex_init(&per_cpu(cpu_access_lock, cpu));
870}
871
872#else
873
874static DEFINE_MUTEX(access_lock);
875
876static inline void trace_access_lock(int cpu)
877{
878 (void)cpu;
879 mutex_lock(&access_lock);
880}
881
882static inline void trace_access_unlock(int cpu)
883{
884 (void)cpu;
885 mutex_unlock(&access_lock);
886}
887
888static inline void trace_access_lock_init(void)
889{
890}
891
892#endif
893
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400894#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500895static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400896 unsigned long flags,
897 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400898static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500899 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400900 unsigned long flags,
901 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400902
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400903#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500904static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400905 unsigned long flags,
906 int skip, int pc, struct pt_regs *regs)
907{
908}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400909static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400911 unsigned long flags,
912 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400913{
914}
915
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400916#endif
917
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500918static __always_inline void
919trace_event_setup(struct ring_buffer_event *event,
920 int type, unsigned long flags, int pc)
921{
922 struct trace_entry *ent = ring_buffer_event_data(event);
923
Cong Wang46710f32019-05-25 09:57:59 -0700924 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500925}
926
927static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500928__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500929 int type,
930 unsigned long len,
931 unsigned long flags, int pc)
932{
933 struct ring_buffer_event *event;
934
935 event = ring_buffer_lock_reserve(buffer, len);
936 if (event != NULL)
937 trace_event_setup(event, type, flags, pc);
938
939 return event;
940}
941
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400942void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400943{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500944 if (tr->array_buffer.buffer)
945 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400946 /*
947 * This flag is looked at when buffers haven't been allocated
948 * yet, or by some tracers (like irqsoff), that just want to
949 * know if the ring buffer has been disabled, but it can handle
950 * races of where it gets disabled but we still do a record.
951 * As the check is in the fast path of the tracers, it is more
952 * important to be fast than accurate.
953 */
954 tr->buffer_disabled = 0;
955 /* Make the flag seen by readers */
956 smp_wmb();
957}
958
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200959/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500960 * tracing_on - enable tracing buffers
961 *
962 * This function enables tracing buffers that may have been
963 * disabled with tracing_off.
964 */
965void tracing_on(void)
966{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400967 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500968}
969EXPORT_SYMBOL_GPL(tracing_on);
970
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500971
972static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500973__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500974{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700975 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500976
977 /* If this is the temp buffer, we need to commit fully */
978 if (this_cpu_read(trace_buffered_event) == event) {
979 /* Length is in event->array[0] */
980 ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 /* Release the temp buffer */
982 this_cpu_dec(trace_buffered_event_cnt);
983 } else
984 ring_buffer_unlock_commit(buffer, event);
985}
986
Steven Rostedt499e5472012-02-22 15:50:28 -0500987/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500988 * __trace_puts - write a constant string into the trace buffer.
989 * @ip: The address of the caller
990 * @str: The constant string to write
991 * @size: The size of the string.
992 */
993int __trace_puts(unsigned long ip, const char *str, int size)
994{
995 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500996 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500997 struct print_entry *entry;
998 unsigned long irq_flags;
999 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001000 int pc;
1001
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001002 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001003 return 0;
1004
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001005 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001006
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001007 if (unlikely(tracing_selftest_running || tracing_disabled))
1008 return 0;
1009
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001010 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1011
1012 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001013 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001014 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001015 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1016 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001017 if (!event) {
1018 size = 0;
1019 goto out;
1020 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001021
1022 entry = ring_buffer_event_data(event);
1023 entry->ip = ip;
1024
1025 memcpy(&entry->buf, str, size);
1026
1027 /* Add a newline if necessary */
1028 if (entry->buf[size - 1] != '\n') {
1029 entry->buf[size] = '\n';
1030 entry->buf[size + 1] = '\0';
1031 } else
1032 entry->buf[size] = '\0';
1033
1034 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001035 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001036 out:
1037 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001038 return size;
1039}
1040EXPORT_SYMBOL_GPL(__trace_puts);
1041
1042/**
1043 * __trace_bputs - write the pointer to a constant string into trace buffer
1044 * @ip: The address of the caller
1045 * @str: The constant string to write to the buffer to
1046 */
1047int __trace_bputs(unsigned long ip, const char *str)
1048{
1049 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001050 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001051 struct bputs_entry *entry;
1052 unsigned long irq_flags;
1053 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001054 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001055 int pc;
1056
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001057 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001058 return 0;
1059
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001060 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001061
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001062 if (unlikely(tracing_selftest_running || tracing_disabled))
1063 return 0;
1064
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001065 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001066 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001067
1068 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001069 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1070 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001071 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001072 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001073
1074 entry = ring_buffer_event_data(event);
1075 entry->ip = ip;
1076 entry->str = str;
1077
1078 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001079 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001080
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001081 ret = 1;
1082 out:
1083 ring_buffer_nest_end(buffer);
1084 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001085}
1086EXPORT_SYMBOL_GPL(__trace_bputs);
1087
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001088#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001089static void tracing_snapshot_instance_cond(struct trace_array *tr,
1090 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001091{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001092 struct tracer *tracer = tr->current_trace;
1093 unsigned long flags;
1094
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001095 if (in_nmi()) {
1096 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1097 internal_trace_puts("*** snapshot is being ignored ***\n");
1098 return;
1099 }
1100
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001101 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001102 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1103 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001104 tracing_off();
1105 return;
1106 }
1107
1108 /* Note, snapshot can not be used when the tracer uses it */
1109 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001110 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1111 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001112 return;
1113 }
1114
1115 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001116 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001117 local_irq_restore(flags);
1118}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001119
Tom Zanussia35873a2019-02-13 17:42:45 -06001120void tracing_snapshot_instance(struct trace_array *tr)
1121{
1122 tracing_snapshot_instance_cond(tr, NULL);
1123}
1124
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001125/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001126 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001127 *
1128 * This causes a swap between the snapshot buffer and the current live
1129 * tracing buffer. You can use this to take snapshots of the live
1130 * trace when some condition is triggered, but continue to trace.
1131 *
1132 * Note, make sure to allocate the snapshot with either
1133 * a tracing_snapshot_alloc(), or by doing it manually
1134 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1135 *
1136 * If the snapshot buffer is not allocated, it will stop tracing.
1137 * Basically making a permanent snapshot.
1138 */
1139void tracing_snapshot(void)
1140{
1141 struct trace_array *tr = &global_trace;
1142
1143 tracing_snapshot_instance(tr);
1144}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001145EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001146
Tom Zanussia35873a2019-02-13 17:42:45 -06001147/**
1148 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1149 * @tr: The tracing instance to snapshot
1150 * @cond_data: The data to be tested conditionally, and possibly saved
1151 *
1152 * This is the same as tracing_snapshot() except that the snapshot is
1153 * conditional - the snapshot will only happen if the
1154 * cond_snapshot.update() implementation receiving the cond_data
1155 * returns true, which means that the trace array's cond_snapshot
1156 * update() operation used the cond_data to determine whether the
1157 * snapshot should be taken, and if it was, presumably saved it along
1158 * with the snapshot.
1159 */
1160void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1161{
1162 tracing_snapshot_instance_cond(tr, cond_data);
1163}
1164EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1165
1166/**
1167 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1168 * @tr: The tracing instance
1169 *
1170 * When the user enables a conditional snapshot using
1171 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1172 * with the snapshot. This accessor is used to retrieve it.
1173 *
1174 * Should not be called from cond_snapshot.update(), since it takes
1175 * the tr->max_lock lock, which the code calling
1176 * cond_snapshot.update() has already done.
1177 *
1178 * Returns the cond_data associated with the trace array's snapshot.
1179 */
1180void *tracing_cond_snapshot_data(struct trace_array *tr)
1181{
1182 void *cond_data = NULL;
1183
1184 arch_spin_lock(&tr->max_lock);
1185
1186 if (tr->cond_snapshot)
1187 cond_data = tr->cond_snapshot->cond_data;
1188
1189 arch_spin_unlock(&tr->max_lock);
1190
1191 return cond_data;
1192}
1193EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1194
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001195static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1196 struct array_buffer *size_buf, int cpu_id);
1197static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001198
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001199int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001200{
1201 int ret;
1202
1203 if (!tr->allocated_snapshot) {
1204
1205 /* allocate spare buffer */
1206 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001207 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001208 if (ret < 0)
1209 return ret;
1210
1211 tr->allocated_snapshot = true;
1212 }
1213
1214 return 0;
1215}
1216
Fabian Frederickad1438a2014-04-17 21:44:42 +02001217static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001218{
1219 /*
1220 * We don't free the ring buffer. instead, resize it because
1221 * The max_tr ring buffer has some state (e.g. ring->clock) and
1222 * we want preserve it.
1223 */
1224 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1225 set_buffer_entries(&tr->max_buffer, 1);
1226 tracing_reset_online_cpus(&tr->max_buffer);
1227 tr->allocated_snapshot = false;
1228}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001229
1230/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001231 * tracing_alloc_snapshot - allocate snapshot buffer.
1232 *
1233 * This only allocates the snapshot buffer if it isn't already
1234 * allocated - it doesn't also take a snapshot.
1235 *
1236 * This is meant to be used in cases where the snapshot buffer needs
1237 * to be set up for events that can't sleep but need to be able to
1238 * trigger a snapshot.
1239 */
1240int tracing_alloc_snapshot(void)
1241{
1242 struct trace_array *tr = &global_trace;
1243 int ret;
1244
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001245 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001246 WARN_ON(ret < 0);
1247
1248 return ret;
1249}
1250EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1251
1252/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001253 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001254 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001255 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001256 * snapshot buffer if it isn't already allocated. Use this only
1257 * where it is safe to sleep, as the allocation may sleep.
1258 *
1259 * This causes a swap between the snapshot buffer and the current live
1260 * tracing buffer. You can use this to take snapshots of the live
1261 * trace when some condition is triggered, but continue to trace.
1262 */
1263void tracing_snapshot_alloc(void)
1264{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 int ret;
1266
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001267 ret = tracing_alloc_snapshot();
1268 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001269 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001270
1271 tracing_snapshot();
1272}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001273EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001274
1275/**
1276 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1277 * @tr: The tracing instance
1278 * @cond_data: User data to associate with the snapshot
1279 * @update: Implementation of the cond_snapshot update function
1280 *
1281 * Check whether the conditional snapshot for the given instance has
1282 * already been enabled, or if the current tracer is already using a
1283 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1284 * save the cond_data and update function inside.
1285 *
1286 * Returns 0 if successful, error otherwise.
1287 */
1288int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1289 cond_update_fn_t update)
1290{
1291 struct cond_snapshot *cond_snapshot;
1292 int ret = 0;
1293
1294 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1295 if (!cond_snapshot)
1296 return -ENOMEM;
1297
1298 cond_snapshot->cond_data = cond_data;
1299 cond_snapshot->update = update;
1300
1301 mutex_lock(&trace_types_lock);
1302
1303 ret = tracing_alloc_snapshot_instance(tr);
1304 if (ret)
1305 goto fail_unlock;
1306
1307 if (tr->current_trace->use_max_tr) {
1308 ret = -EBUSY;
1309 goto fail_unlock;
1310 }
1311
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001312 /*
1313 * The cond_snapshot can only change to NULL without the
1314 * trace_types_lock. We don't care if we race with it going
1315 * to NULL, but we want to make sure that it's not set to
1316 * something other than NULL when we get here, which we can
1317 * do safely with only holding the trace_types_lock and not
1318 * having to take the max_lock.
1319 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001320 if (tr->cond_snapshot) {
1321 ret = -EBUSY;
1322 goto fail_unlock;
1323 }
1324
1325 arch_spin_lock(&tr->max_lock);
1326 tr->cond_snapshot = cond_snapshot;
1327 arch_spin_unlock(&tr->max_lock);
1328
1329 mutex_unlock(&trace_types_lock);
1330
1331 return ret;
1332
1333 fail_unlock:
1334 mutex_unlock(&trace_types_lock);
1335 kfree(cond_snapshot);
1336 return ret;
1337}
1338EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1339
1340/**
1341 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1342 * @tr: The tracing instance
1343 *
1344 * Check whether the conditional snapshot for the given instance is
1345 * enabled; if so, free the cond_snapshot associated with it,
1346 * otherwise return -EINVAL.
1347 *
1348 * Returns 0 if successful, error otherwise.
1349 */
1350int tracing_snapshot_cond_disable(struct trace_array *tr)
1351{
1352 int ret = 0;
1353
1354 arch_spin_lock(&tr->max_lock);
1355
1356 if (!tr->cond_snapshot)
1357 ret = -EINVAL;
1358 else {
1359 kfree(tr->cond_snapshot);
1360 tr->cond_snapshot = NULL;
1361 }
1362
1363 arch_spin_unlock(&tr->max_lock);
1364
1365 return ret;
1366}
1367EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001368#else
1369void tracing_snapshot(void)
1370{
1371 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1372}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001373EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001374void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1375{
1376 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1377}
1378EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001379int tracing_alloc_snapshot(void)
1380{
1381 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1382 return -ENODEV;
1383}
1384EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001385void tracing_snapshot_alloc(void)
1386{
1387 /* Give warning */
1388 tracing_snapshot();
1389}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001390EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001391void *tracing_cond_snapshot_data(struct trace_array *tr)
1392{
1393 return NULL;
1394}
1395EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1396int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1397{
1398 return -ENODEV;
1399}
1400EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1401int tracing_snapshot_cond_disable(struct trace_array *tr)
1402{
1403 return false;
1404}
1405EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001406#endif /* CONFIG_TRACER_SNAPSHOT */
1407
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001408void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001409{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001410 if (tr->array_buffer.buffer)
1411 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001412 /*
1413 * This flag is looked at when buffers haven't been allocated
1414 * yet, or by some tracers (like irqsoff), that just want to
1415 * know if the ring buffer has been disabled, but it can handle
1416 * races of where it gets disabled but we still do a record.
1417 * As the check is in the fast path of the tracers, it is more
1418 * important to be fast than accurate.
1419 */
1420 tr->buffer_disabled = 1;
1421 /* Make the flag seen by readers */
1422 smp_wmb();
1423}
1424
Steven Rostedt499e5472012-02-22 15:50:28 -05001425/**
1426 * tracing_off - turn off tracing buffers
1427 *
1428 * This function stops the tracing buffers from recording data.
1429 * It does not disable any overhead the tracers themselves may
1430 * be causing. This function simply causes all recording to
1431 * the ring buffers to fail.
1432 */
1433void tracing_off(void)
1434{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001435 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001436}
1437EXPORT_SYMBOL_GPL(tracing_off);
1438
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001439void disable_trace_on_warning(void)
1440{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001441 if (__disable_trace_on_warning) {
1442 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1443 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001444 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001445 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001446}
1447
Steven Rostedt499e5472012-02-22 15:50:28 -05001448/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001449 * tracer_tracing_is_on - show real state of ring buffer enabled
1450 * @tr : the trace array to know if ring buffer is enabled
1451 *
1452 * Shows real state of the ring buffer if it is enabled or not.
1453 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001454bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001455{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001456 if (tr->array_buffer.buffer)
1457 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001458 return !tr->buffer_disabled;
1459}
1460
Steven Rostedt499e5472012-02-22 15:50:28 -05001461/**
1462 * tracing_is_on - show state of ring buffers enabled
1463 */
1464int tracing_is_on(void)
1465{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001466 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001467}
1468EXPORT_SYMBOL_GPL(tracing_is_on);
1469
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001470static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001471{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001472 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001473
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001474 if (!str)
1475 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001476 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001477 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001478 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001479 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001480 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481 return 1;
1482}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001483__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484
Tim Bird0e950172010-02-25 15:36:43 -08001485static int __init set_tracing_thresh(char *str)
1486{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001487 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001488 int ret;
1489
1490 if (!str)
1491 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001492 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001493 if (ret < 0)
1494 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001495 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001496 return 1;
1497}
1498__setup("tracing_thresh=", set_tracing_thresh);
1499
Steven Rostedt57f50be2008-05-12 21:20:44 +02001500unsigned long nsecs_to_usecs(unsigned long nsecs)
1501{
1502 return nsecs / 1000;
1503}
1504
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001505/*
1506 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001507 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001508 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001509 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001510 */
1511#undef C
1512#define C(a, b) b
1513
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001514/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001516 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 NULL
1518};
1519
Zhaolei5079f322009-08-25 16:12:56 +08001520static struct {
1521 u64 (*func)(void);
1522 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001523 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001524} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001525 { trace_clock_local, "local", 1 },
1526 { trace_clock_global, "global", 1 },
1527 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001528 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001529 { trace_clock, "perf", 1 },
1530 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001531 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001532 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001533 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001534};
1535
Tom Zanussi860f9f62018-01-15 20:51:48 -06001536bool trace_clock_in_ns(struct trace_array *tr)
1537{
1538 if (trace_clocks[tr->clock_id].in_ns)
1539 return true;
1540
1541 return false;
1542}
1543
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001544/*
1545 * trace_parser_get_init - gets the buffer for trace parser
1546 */
1547int trace_parser_get_init(struct trace_parser *parser, int size)
1548{
1549 memset(parser, 0, sizeof(*parser));
1550
1551 parser->buffer = kmalloc(size, GFP_KERNEL);
1552 if (!parser->buffer)
1553 return 1;
1554
1555 parser->size = size;
1556 return 0;
1557}
1558
1559/*
1560 * trace_parser_put - frees the buffer for trace parser
1561 */
1562void trace_parser_put(struct trace_parser *parser)
1563{
1564 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001565 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001566}
1567
1568/*
1569 * trace_get_user - reads the user input string separated by space
1570 * (matched by isspace(ch))
1571 *
1572 * For each string found the 'struct trace_parser' is updated,
1573 * and the function returns.
1574 *
1575 * Returns number of bytes read.
1576 *
1577 * See kernel/trace/trace.h for 'struct trace_parser' details.
1578 */
1579int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1580 size_t cnt, loff_t *ppos)
1581{
1582 char ch;
1583 size_t read = 0;
1584 ssize_t ret;
1585
1586 if (!*ppos)
1587 trace_parser_clear(parser);
1588
1589 ret = get_user(ch, ubuf++);
1590 if (ret)
1591 goto out;
1592
1593 read++;
1594 cnt--;
1595
1596 /*
1597 * The parser is not finished with the last write,
1598 * continue reading the user input without skipping spaces.
1599 */
1600 if (!parser->cont) {
1601 /* skip white space */
1602 while (cnt && isspace(ch)) {
1603 ret = get_user(ch, ubuf++);
1604 if (ret)
1605 goto out;
1606 read++;
1607 cnt--;
1608 }
1609
Changbin Du76638d92018-01-16 17:02:29 +08001610 parser->idx = 0;
1611
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001612 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001613 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001614 *ppos += read;
1615 ret = read;
1616 goto out;
1617 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001618 }
1619
1620 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001621 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001622 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 parser->buffer[parser->idx++] = ch;
1624 else {
1625 ret = -EINVAL;
1626 goto out;
1627 }
1628 ret = get_user(ch, ubuf++);
1629 if (ret)
1630 goto out;
1631 read++;
1632 cnt--;
1633 }
1634
1635 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001636 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001637 parser->buffer[parser->idx] = 0;
1638 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001639 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001640 parser->cont = true;
1641 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001642 /* Make sure the parsed string always terminates with '\0'. */
1643 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001644 } else {
1645 ret = -EINVAL;
1646 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001647 }
1648
1649 *ppos += read;
1650 ret = read;
1651
1652out:
1653 return ret;
1654}
1655
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001656/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001657static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001658{
1659 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001660
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001661 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001662 return -EBUSY;
1663
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001664 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001665 if (cnt > len)
1666 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001667 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001668
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001669 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001670 return cnt;
1671}
1672
Tim Bird0e950172010-02-25 15:36:43 -08001673unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001674static const struct file_operations tracing_max_lat_fops;
1675
1676#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1677 defined(CONFIG_FSNOTIFY)
1678
1679static struct workqueue_struct *fsnotify_wq;
1680
1681static void latency_fsnotify_workfn(struct work_struct *work)
1682{
1683 struct trace_array *tr = container_of(work, struct trace_array,
1684 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001685 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001686}
1687
1688static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1689{
1690 struct trace_array *tr = container_of(iwork, struct trace_array,
1691 fsnotify_irqwork);
1692 queue_work(fsnotify_wq, &tr->fsnotify_work);
1693}
1694
1695static void trace_create_maxlat_file(struct trace_array *tr,
1696 struct dentry *d_tracer)
1697{
1698 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1699 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1700 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1701 d_tracer, &tr->max_latency,
1702 &tracing_max_lat_fops);
1703}
1704
1705__init static int latency_fsnotify_init(void)
1706{
1707 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1708 WQ_UNBOUND | WQ_HIGHPRI, 0);
1709 if (!fsnotify_wq) {
1710 pr_err("Unable to allocate tr_max_lat_wq\n");
1711 return -ENOMEM;
1712 }
1713 return 0;
1714}
1715
1716late_initcall_sync(latency_fsnotify_init);
1717
1718void latency_fsnotify(struct trace_array *tr)
1719{
1720 if (!fsnotify_wq)
1721 return;
1722 /*
1723 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1724 * possible that we are called from __schedule() or do_idle(), which
1725 * could cause a deadlock.
1726 */
1727 irq_work_queue(&tr->fsnotify_irqwork);
1728}
1729
1730/*
1731 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1732 * defined(CONFIG_FSNOTIFY)
1733 */
1734#else
1735
1736#define trace_create_maxlat_file(tr, d_tracer) \
1737 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1738 &tr->max_latency, &tracing_max_lat_fops)
1739
1740#endif
Tim Bird0e950172010-02-25 15:36:43 -08001741
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001742#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001743/*
1744 * Copy the new maximum trace into the separate maximum-trace
1745 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001746 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001747 */
1748static void
1749__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1750{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001751 struct array_buffer *trace_buf = &tr->array_buffer;
1752 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001753 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1754 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001755
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001756 max_buf->cpu = cpu;
1757 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001758
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001759 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001760 max_data->critical_start = data->critical_start;
1761 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001762
Tom Zanussi85f726a2019-03-05 10:12:00 -06001763 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001764 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001765 /*
1766 * If tsk == current, then use current_uid(), as that does not use
1767 * RCU. The irq tracer can be called out of RCU scope.
1768 */
1769 if (tsk == current)
1770 max_data->uid = current_uid();
1771 else
1772 max_data->uid = task_uid(tsk);
1773
Steven Rostedt8248ac02009-09-02 12:27:41 -04001774 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1775 max_data->policy = tsk->policy;
1776 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001777
1778 /* record this tasks comm */
1779 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001780 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001781}
1782
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001783/**
1784 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1785 * @tr: tracer
1786 * @tsk: the task with the latency
1787 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001788 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001789 *
1790 * Flip the buffers between the @tr and the max_tr and record information
1791 * about which task was the cause of this latency.
1792 */
Ingo Molnare309b412008-05-12 21:20:51 +02001793void
Tom Zanussia35873a2019-02-13 17:42:45 -06001794update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1795 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001796{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001797 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001798 return;
1799
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001800 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001801
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001802 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001803 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001804 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001805 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001806 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001807
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001808 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001809
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001810 /* Inherit the recordable setting from array_buffer */
1811 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001812 ring_buffer_record_on(tr->max_buffer.buffer);
1813 else
1814 ring_buffer_record_off(tr->max_buffer.buffer);
1815
Tom Zanussia35873a2019-02-13 17:42:45 -06001816#ifdef CONFIG_TRACER_SNAPSHOT
1817 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1818 goto out_unlock;
1819#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001820 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001821
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001822 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001823
1824 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001825 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001826}
1827
1828/**
1829 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001830 * @tr: tracer
1831 * @tsk: task with the latency
1832 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001833 *
1834 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001835 */
Ingo Molnare309b412008-05-12 21:20:51 +02001836void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001837update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1838{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001839 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001841 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001842 return;
1843
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001844 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001845 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001846 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001847 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001848 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001849 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001850
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001851 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001852
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001853 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001854
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001855 if (ret == -EBUSY) {
1856 /*
1857 * We failed to swap the buffer due to a commit taking
1858 * place on this CPU. We fail to record, but we reset
1859 * the max trace buffer (no one writes directly to it)
1860 * and flag that it failed.
1861 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001862 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001863 "Failed to swap buffers due to commit in progress\n");
1864 }
1865
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001866 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001867
1868 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001869 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001870}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001871#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001872
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001873static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001874{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001875 /* Iterators are static, they should be filled or empty */
1876 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001877 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001878
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001879 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001880 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001881}
1882
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001883#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001884static bool selftests_can_run;
1885
1886struct trace_selftests {
1887 struct list_head list;
1888 struct tracer *type;
1889};
1890
1891static LIST_HEAD(postponed_selftests);
1892
1893static int save_selftest(struct tracer *type)
1894{
1895 struct trace_selftests *selftest;
1896
1897 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1898 if (!selftest)
1899 return -ENOMEM;
1900
1901 selftest->type = type;
1902 list_add(&selftest->list, &postponed_selftests);
1903 return 0;
1904}
1905
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001906static int run_tracer_selftest(struct tracer *type)
1907{
1908 struct trace_array *tr = &global_trace;
1909 struct tracer *saved_tracer = tr->current_trace;
1910 int ret;
1911
1912 if (!type->selftest || tracing_selftest_disabled)
1913 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001914
1915 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001916 * If a tracer registers early in boot up (before scheduling is
1917 * initialized and such), then do not run its selftests yet.
1918 * Instead, run it a little later in the boot process.
1919 */
1920 if (!selftests_can_run)
1921 return save_selftest(type);
1922
1923 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001924 * Run a selftest on this tracer.
1925 * Here we reset the trace buffer, and set the current
1926 * tracer to be this tracer. The tracer can then run some
1927 * internal tracing to verify that everything is in order.
1928 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001929 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001930 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001931
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001932 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001933
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001934#ifdef CONFIG_TRACER_MAX_TRACE
1935 if (type->use_max_tr) {
1936 /* If we expanded the buffers, make sure the max is expanded too */
1937 if (ring_buffer_expanded)
1938 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1939 RING_BUFFER_ALL_CPUS);
1940 tr->allocated_snapshot = true;
1941 }
1942#endif
1943
1944 /* the test is responsible for initializing and enabling */
1945 pr_info("Testing tracer %s: ", type->name);
1946 ret = type->selftest(type, tr);
1947 /* the test is responsible for resetting too */
1948 tr->current_trace = saved_tracer;
1949 if (ret) {
1950 printk(KERN_CONT "FAILED!\n");
1951 /* Add the warning after printing 'FAILED' */
1952 WARN_ON(1);
1953 return -1;
1954 }
1955 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001956 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001957
1958#ifdef CONFIG_TRACER_MAX_TRACE
1959 if (type->use_max_tr) {
1960 tr->allocated_snapshot = false;
1961
1962 /* Shrink the max buffer again */
1963 if (ring_buffer_expanded)
1964 ring_buffer_resize(tr->max_buffer.buffer, 1,
1965 RING_BUFFER_ALL_CPUS);
1966 }
1967#endif
1968
1969 printk(KERN_CONT "PASSED\n");
1970 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001971}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001972
1973static __init int init_trace_selftests(void)
1974{
1975 struct trace_selftests *p, *n;
1976 struct tracer *t, **last;
1977 int ret;
1978
1979 selftests_can_run = true;
1980
1981 mutex_lock(&trace_types_lock);
1982
1983 if (list_empty(&postponed_selftests))
1984 goto out;
1985
1986 pr_info("Running postponed tracer tests:\n");
1987
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001988 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001989 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001990 /* This loop can take minutes when sanitizers are enabled, so
1991 * lets make sure we allow RCU processing.
1992 */
1993 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001994 ret = run_tracer_selftest(p->type);
1995 /* If the test fails, then warn and remove from available_tracers */
1996 if (ret < 0) {
1997 WARN(1, "tracer: %s failed selftest, disabling\n",
1998 p->type->name);
1999 last = &trace_types;
2000 for (t = trace_types; t; t = t->next) {
2001 if (t == p->type) {
2002 *last = t->next;
2003 break;
2004 }
2005 last = &t->next;
2006 }
2007 }
2008 list_del(&p->list);
2009 kfree(p);
2010 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002011 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002012
2013 out:
2014 mutex_unlock(&trace_types_lock);
2015
2016 return 0;
2017}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002018core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002019#else
2020static inline int run_tracer_selftest(struct tracer *type)
2021{
2022 return 0;
2023}
2024#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002025
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002026static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2027
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002028static void __init apply_trace_boot_options(void);
2029
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002030/**
2031 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002032 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002033 *
2034 * Register a new plugin tracer.
2035 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002036int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002037{
2038 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002039 int ret = 0;
2040
2041 if (!type->name) {
2042 pr_info("Tracer must have a name\n");
2043 return -1;
2044 }
2045
Dan Carpenter24a461d2010-07-10 12:06:44 +02002046 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002047 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2048 return -1;
2049 }
2050
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002051 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002052 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002053 type->name);
2054 return -EPERM;
2055 }
2056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002057 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002058
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002059 tracing_selftest_running = true;
2060
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002061 for (t = trace_types; t; t = t->next) {
2062 if (strcmp(type->name, t->name) == 0) {
2063 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002064 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002065 type->name);
2066 ret = -1;
2067 goto out;
2068 }
2069 }
2070
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002071 if (!type->set_flag)
2072 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002073 if (!type->flags) {
2074 /*allocate a dummy tracer_flags*/
2075 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002076 if (!type->flags) {
2077 ret = -ENOMEM;
2078 goto out;
2079 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002080 type->flags->val = 0;
2081 type->flags->opts = dummy_tracer_opt;
2082 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002083 if (!type->flags->opts)
2084 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002085
Chunyu Hud39cdd22016-03-08 21:37:01 +08002086 /* store the tracer for __set_tracer_option */
2087 type->flags->trace = type;
2088
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002089 ret = run_tracer_selftest(type);
2090 if (ret < 0)
2091 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002092
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002093 type->next = trace_types;
2094 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002095 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002096
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002097 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002098 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002099 mutex_unlock(&trace_types_lock);
2100
Steven Rostedtdac74942009-02-05 01:13:38 -05002101 if (ret || !default_bootup_tracer)
2102 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002103
Li Zefanee6c2c12009-09-18 14:06:47 +08002104 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002105 goto out_unlock;
2106
2107 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2108 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002109 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002110 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002111
2112 apply_trace_boot_options();
2113
Steven Rostedtdac74942009-02-05 01:13:38 -05002114 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002115 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05002116#ifdef CONFIG_FTRACE_STARTUP_TEST
2117 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
2118 type->name);
2119#endif
2120
2121 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002122 return ret;
2123}
2124
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002125static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002126{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002127 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002128
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002129 if (!buffer)
2130 return;
2131
Steven Rostedtf6339032009-09-04 12:35:16 -04002132 ring_buffer_record_disable(buffer);
2133
2134 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002135 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002136 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002137
2138 ring_buffer_record_enable(buffer);
2139}
2140
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002141void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002142{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002143 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002144
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002145 if (!buffer)
2146 return;
2147
Steven Rostedt621968c2009-09-04 12:02:35 -04002148 ring_buffer_record_disable(buffer);
2149
2150 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002151 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002152
Alexander Z Lam94571582013-08-02 18:36:16 -07002153 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002154
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002155 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002156
2157 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002158}
2159
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002160/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002161void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002162{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002163 struct trace_array *tr;
2164
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002165 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002166 if (!tr->clear_trace)
2167 continue;
2168 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002169 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002170#ifdef CONFIG_TRACER_MAX_TRACE
2171 tracing_reset_online_cpus(&tr->max_buffer);
2172#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002173 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002174}
2175
Joel Fernandesd914ba32017-06-26 19:01:55 -07002176static int *tgid_map;
2177
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002178#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002179#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002180static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002181struct saved_cmdlines_buffer {
2182 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2183 unsigned *map_cmdline_to_pid;
2184 unsigned cmdline_num;
2185 int cmdline_idx;
2186 char *saved_cmdlines;
2187};
2188static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002189
Steven Rostedt25b0b442008-05-12 21:21:00 +02002190/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002191static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002192
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002193static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002194{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002195 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2196}
2197
2198static inline void set_cmdline(int idx, const char *cmdline)
2199{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002200 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002201}
2202
2203static int allocate_cmdlines_buffer(unsigned int val,
2204 struct saved_cmdlines_buffer *s)
2205{
Kees Cook6da2ec52018-06-12 13:55:00 -07002206 s->map_cmdline_to_pid = kmalloc_array(val,
2207 sizeof(*s->map_cmdline_to_pid),
2208 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002209 if (!s->map_cmdline_to_pid)
2210 return -ENOMEM;
2211
Kees Cook6da2ec52018-06-12 13:55:00 -07002212 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002213 if (!s->saved_cmdlines) {
2214 kfree(s->map_cmdline_to_pid);
2215 return -ENOMEM;
2216 }
2217
2218 s->cmdline_idx = 0;
2219 s->cmdline_num = val;
2220 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2221 sizeof(s->map_pid_to_cmdline));
2222 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2223 val * sizeof(*s->map_cmdline_to_pid));
2224
2225 return 0;
2226}
2227
2228static int trace_create_savedcmd(void)
2229{
2230 int ret;
2231
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002232 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002233 if (!savedcmd)
2234 return -ENOMEM;
2235
2236 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2237 if (ret < 0) {
2238 kfree(savedcmd);
2239 savedcmd = NULL;
2240 return -ENOMEM;
2241 }
2242
2243 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244}
2245
Carsten Emdeb5130b12009-09-13 01:43:07 +02002246int is_tracing_stopped(void)
2247{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002248 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002249}
2250
Steven Rostedt0f048702008-11-05 16:05:44 -05002251/**
2252 * tracing_start - quick start of the tracer
2253 *
2254 * If tracing is enabled but was stopped by tracing_stop,
2255 * this will start the tracer back up.
2256 */
2257void tracing_start(void)
2258{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002259 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002260 unsigned long flags;
2261
2262 if (tracing_disabled)
2263 return;
2264
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002265 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2266 if (--global_trace.stop_count) {
2267 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002268 /* Someone screwed up their debugging */
2269 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002270 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002271 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002272 goto out;
2273 }
2274
Steven Rostedta2f80712010-03-12 19:56:00 -05002275 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002276 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002277
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002278 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002279 if (buffer)
2280 ring_buffer_record_enable(buffer);
2281
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002282#ifdef CONFIG_TRACER_MAX_TRACE
2283 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002284 if (buffer)
2285 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002286#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002287
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002288 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002289
Steven Rostedt0f048702008-11-05 16:05:44 -05002290 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002291 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2292}
2293
2294static void tracing_start_tr(struct trace_array *tr)
2295{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002296 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002297 unsigned long flags;
2298
2299 if (tracing_disabled)
2300 return;
2301
2302 /* If global, we need to also start the max tracer */
2303 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2304 return tracing_start();
2305
2306 raw_spin_lock_irqsave(&tr->start_lock, flags);
2307
2308 if (--tr->stop_count) {
2309 if (tr->stop_count < 0) {
2310 /* Someone screwed up their debugging */
2311 WARN_ON_ONCE(1);
2312 tr->stop_count = 0;
2313 }
2314 goto out;
2315 }
2316
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002317 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002318 if (buffer)
2319 ring_buffer_record_enable(buffer);
2320
2321 out:
2322 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002323}
2324
2325/**
2326 * tracing_stop - quick stop of the tracer
2327 *
2328 * Light weight way to stop tracing. Use in conjunction with
2329 * tracing_start.
2330 */
2331void tracing_stop(void)
2332{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002333 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002334 unsigned long flags;
2335
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002336 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2337 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002338 goto out;
2339
Steven Rostedta2f80712010-03-12 19:56:00 -05002340 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002341 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002342
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002343 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002344 if (buffer)
2345 ring_buffer_record_disable(buffer);
2346
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002347#ifdef CONFIG_TRACER_MAX_TRACE
2348 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002349 if (buffer)
2350 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002351#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002352
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002353 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002354
Steven Rostedt0f048702008-11-05 16:05:44 -05002355 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002356 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2357}
2358
2359static void tracing_stop_tr(struct trace_array *tr)
2360{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002361 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002362 unsigned long flags;
2363
2364 /* If global, we need to also stop the max tracer */
2365 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2366 return tracing_stop();
2367
2368 raw_spin_lock_irqsave(&tr->start_lock, flags);
2369 if (tr->stop_count++)
2370 goto out;
2371
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002372 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002373 if (buffer)
2374 ring_buffer_record_disable(buffer);
2375
2376 out:
2377 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002378}
2379
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002380static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381{
Carsten Emdea635cf02009-03-18 09:00:41 +01002382 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002383
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002384 /* treat recording of idle task as a success */
2385 if (!tsk->pid)
2386 return 1;
2387
2388 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002389 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390
2391 /*
2392 * It's not the end of the world if we don't get
2393 * the lock, but we also don't want to spin
2394 * nor do we want to disable interrupts,
2395 * so if we miss here, then better luck next time.
2396 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002397 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002398 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002400 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002401 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002402 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403
Carsten Emdea635cf02009-03-18 09:00:41 +01002404 /*
2405 * Check whether the cmdline buffer at idx has a pid
2406 * mapped. We are going to overwrite that entry so we
2407 * need to clear the map_pid_to_cmdline. Otherwise we
2408 * would read the new comm for the old pid.
2409 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002410 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002411 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002412 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002414 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2415 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002416
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002417 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 }
2419
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002420 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002422 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002423
2424 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425}
2426
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002427static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429 unsigned map;
2430
Steven Rostedt4ca530852009-03-16 19:20:15 -04002431 if (!pid) {
2432 strcpy(comm, "<idle>");
2433 return;
2434 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435
Steven Rostedt74bf4072010-01-25 15:11:53 -05002436 if (WARN_ON_ONCE(pid < 0)) {
2437 strcpy(comm, "<XXX>");
2438 return;
2439 }
2440
Steven Rostedt4ca530852009-03-16 19:20:15 -04002441 if (pid > PID_MAX_DEFAULT) {
2442 strcpy(comm, "<...>");
2443 return;
2444 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002445
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002446 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002447 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302448 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002449 else
2450 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002451}
2452
2453void trace_find_cmdline(int pid, char comm[])
2454{
2455 preempt_disable();
2456 arch_spin_lock(&trace_cmdline_lock);
2457
2458 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002460 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002461 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462}
2463
Joel Fernandesd914ba32017-06-26 19:01:55 -07002464int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002466 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2467 return 0;
2468
2469 return tgid_map[pid];
2470}
2471
2472static int trace_save_tgid(struct task_struct *tsk)
2473{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002474 /* treat recording of idle task as a success */
2475 if (!tsk->pid)
2476 return 1;
2477
2478 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002479 return 0;
2480
2481 tgid_map[tsk->pid] = tsk->tgid;
2482 return 1;
2483}
2484
2485static bool tracing_record_taskinfo_skip(int flags)
2486{
2487 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2488 return true;
2489 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2490 return true;
2491 if (!__this_cpu_read(trace_taskinfo_save))
2492 return true;
2493 return false;
2494}
2495
2496/**
2497 * tracing_record_taskinfo - record the task info of a task
2498 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002499 * @task: task to record
2500 * @flags: TRACE_RECORD_CMDLINE for recording comm
2501 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002502 */
2503void tracing_record_taskinfo(struct task_struct *task, int flags)
2504{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002505 bool done;
2506
Joel Fernandesd914ba32017-06-26 19:01:55 -07002507 if (tracing_record_taskinfo_skip(flags))
2508 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002509
2510 /*
2511 * Record as much task information as possible. If some fail, continue
2512 * to try to record the others.
2513 */
2514 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2515 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2516
2517 /* If recording any information failed, retry again soon. */
2518 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519 return;
2520
Joel Fernandesd914ba32017-06-26 19:01:55 -07002521 __this_cpu_write(trace_taskinfo_save, false);
2522}
2523
2524/**
2525 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2526 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002527 * @prev: previous task during sched_switch
2528 * @next: next task during sched_switch
2529 * @flags: TRACE_RECORD_CMDLINE for recording comm
2530 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002531 */
2532void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2533 struct task_struct *next, int flags)
2534{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002535 bool done;
2536
Joel Fernandesd914ba32017-06-26 19:01:55 -07002537 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002538 return;
2539
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002540 /*
2541 * Record as much task information as possible. If some fail, continue
2542 * to try to record the others.
2543 */
2544 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2545 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2546 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2547 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002548
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002549 /* If recording any information failed, retry again soon. */
2550 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002551 return;
2552
2553 __this_cpu_write(trace_taskinfo_save, false);
2554}
2555
2556/* Helpers to record a specific task information */
2557void tracing_record_cmdline(struct task_struct *task)
2558{
2559 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2560}
2561
2562void tracing_record_tgid(struct task_struct *task)
2563{
2564 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565}
2566
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002567/*
2568 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2569 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2570 * simplifies those functions and keeps them in sync.
2571 */
2572enum print_line_t trace_handle_return(struct trace_seq *s)
2573{
2574 return trace_seq_has_overflowed(s) ?
2575 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2576}
2577EXPORT_SYMBOL_GPL(trace_handle_return);
2578
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002579void
Cong Wang46710f32019-05-25 09:57:59 -07002580tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2581 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582{
2583 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584
Steven Rostedt777e2082008-09-29 23:02:42 -04002585 entry->preempt_count = pc & 0xff;
2586 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002587 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002588 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002589#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002590 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002591#else
2592 TRACE_FLAG_IRQS_NOSUPPORT |
2593#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002594 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302596 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002597 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2598 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002600EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601
Steven Rostedte77405a2009-09-02 14:17:06 -04002602struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002603trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002604 int type,
2605 unsigned long len,
2606 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002607{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002608 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002609}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002610
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002611DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2612DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2613static int trace_buffered_event_ref;
2614
2615/**
2616 * trace_buffered_event_enable - enable buffering events
2617 *
2618 * When events are being filtered, it is quicker to use a temporary
2619 * buffer to write the event data into if there's a likely chance
2620 * that it will not be committed. The discard of the ring buffer
2621 * is not as fast as committing, and is much slower than copying
2622 * a commit.
2623 *
2624 * When an event is to be filtered, allocate per cpu buffers to
2625 * write the event data into, and if the event is filtered and discarded
2626 * it is simply dropped, otherwise, the entire data is to be committed
2627 * in one shot.
2628 */
2629void trace_buffered_event_enable(void)
2630{
2631 struct ring_buffer_event *event;
2632 struct page *page;
2633 int cpu;
2634
2635 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2636
2637 if (trace_buffered_event_ref++)
2638 return;
2639
2640 for_each_tracing_cpu(cpu) {
2641 page = alloc_pages_node(cpu_to_node(cpu),
2642 GFP_KERNEL | __GFP_NORETRY, 0);
2643 if (!page)
2644 goto failed;
2645
2646 event = page_address(page);
2647 memset(event, 0, sizeof(*event));
2648
2649 per_cpu(trace_buffered_event, cpu) = event;
2650
2651 preempt_disable();
2652 if (cpu == smp_processor_id() &&
2653 this_cpu_read(trace_buffered_event) !=
2654 per_cpu(trace_buffered_event, cpu))
2655 WARN_ON_ONCE(1);
2656 preempt_enable();
2657 }
2658
2659 return;
2660 failed:
2661 trace_buffered_event_disable();
2662}
2663
2664static void enable_trace_buffered_event(void *data)
2665{
2666 /* Probably not needed, but do it anyway */
2667 smp_rmb();
2668 this_cpu_dec(trace_buffered_event_cnt);
2669}
2670
2671static void disable_trace_buffered_event(void *data)
2672{
2673 this_cpu_inc(trace_buffered_event_cnt);
2674}
2675
2676/**
2677 * trace_buffered_event_disable - disable buffering events
2678 *
2679 * When a filter is removed, it is faster to not use the buffered
2680 * events, and to commit directly into the ring buffer. Free up
2681 * the temp buffers when there are no more users. This requires
2682 * special synchronization with current events.
2683 */
2684void trace_buffered_event_disable(void)
2685{
2686 int cpu;
2687
2688 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2689
2690 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2691 return;
2692
2693 if (--trace_buffered_event_ref)
2694 return;
2695
2696 preempt_disable();
2697 /* For each CPU, set the buffer as used. */
2698 smp_call_function_many(tracing_buffer_mask,
2699 disable_trace_buffered_event, NULL, 1);
2700 preempt_enable();
2701
2702 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002703 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002704
2705 for_each_tracing_cpu(cpu) {
2706 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2707 per_cpu(trace_buffered_event, cpu) = NULL;
2708 }
2709 /*
2710 * Make sure trace_buffered_event is NULL before clearing
2711 * trace_buffered_event_cnt.
2712 */
2713 smp_wmb();
2714
2715 preempt_disable();
2716 /* Do the work on each cpu */
2717 smp_call_function_many(tracing_buffer_mask,
2718 enable_trace_buffered_event, NULL, 1);
2719 preempt_enable();
2720}
2721
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002722static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002723
Steven Rostedtef5580d2009-02-27 19:38:04 -05002724struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002725trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002726 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002727 int type, unsigned long len,
2728 unsigned long flags, int pc)
2729{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002730 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002731 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002732
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002733 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002734
Tom Zanussi00b41452018-01-15 20:51:39 -06002735 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002736 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2737 (entry = this_cpu_read(trace_buffered_event))) {
2738 /* Try to use the per cpu buffer first */
2739 val = this_cpu_inc_return(trace_buffered_event_cnt);
2740 if (val == 1) {
2741 trace_event_setup(entry, type, flags, pc);
2742 entry->array[0] = len;
2743 return entry;
2744 }
2745 this_cpu_dec(trace_buffered_event_cnt);
2746 }
2747
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002748 entry = __trace_buffer_lock_reserve(*current_rb,
2749 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002750 /*
2751 * If tracing is off, but we have triggers enabled
2752 * we still need to look at the event data. Use the temp_buffer
2753 * to store the trace event for the tigger to use. It's recusive
2754 * safe and will not be recorded anywhere.
2755 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002756 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002757 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002758 entry = __trace_buffer_lock_reserve(*current_rb,
2759 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002760 }
2761 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002762}
2763EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2764
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002765static DEFINE_SPINLOCK(tracepoint_iter_lock);
2766static DEFINE_MUTEX(tracepoint_printk_mutex);
2767
2768static void output_printk(struct trace_event_buffer *fbuffer)
2769{
2770 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002771 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002772 struct trace_event *event;
2773 unsigned long flags;
2774 struct trace_iterator *iter = tracepoint_print_iter;
2775
2776 /* We should never get here if iter is NULL */
2777 if (WARN_ON_ONCE(!iter))
2778 return;
2779
2780 event_call = fbuffer->trace_file->event_call;
2781 if (!event_call || !event_call->event.funcs ||
2782 !event_call->event.funcs->trace)
2783 return;
2784
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002785 file = fbuffer->trace_file;
2786 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2787 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2788 !filter_match_preds(file->filter, fbuffer->entry)))
2789 return;
2790
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002791 event = &fbuffer->trace_file->event_call->event;
2792
2793 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2794 trace_seq_init(&iter->seq);
2795 iter->ent = fbuffer->entry;
2796 event_call->event.funcs->trace(iter, 0, event);
2797 trace_seq_putc(&iter->seq, 0);
2798 printk("%s", iter->seq.buffer);
2799
2800 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2801}
2802
2803int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002804 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002805 loff_t *ppos)
2806{
2807 int save_tracepoint_printk;
2808 int ret;
2809
2810 mutex_lock(&tracepoint_printk_mutex);
2811 save_tracepoint_printk = tracepoint_printk;
2812
2813 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2814
2815 /*
2816 * This will force exiting early, as tracepoint_printk
2817 * is always zero when tracepoint_printk_iter is not allocated
2818 */
2819 if (!tracepoint_print_iter)
2820 tracepoint_printk = 0;
2821
2822 if (save_tracepoint_printk == tracepoint_printk)
2823 goto out;
2824
2825 if (tracepoint_printk)
2826 static_key_enable(&tracepoint_printk_key.key);
2827 else
2828 static_key_disable(&tracepoint_printk_key.key);
2829
2830 out:
2831 mutex_unlock(&tracepoint_printk_mutex);
2832
2833 return ret;
2834}
2835
2836void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2837{
2838 if (static_key_false(&tracepoint_printk_key.key))
2839 output_printk(fbuffer);
2840
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002841 if (static_branch_unlikely(&trace_event_exports_enabled))
2842 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002843 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002844 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002845 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002846}
2847EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2848
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002849/*
2850 * Skip 3:
2851 *
2852 * trace_buffer_unlock_commit_regs()
2853 * trace_event_buffer_commit()
2854 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302855 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002856# define STACK_SKIP 3
2857
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002858void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002859 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002860 struct ring_buffer_event *event,
2861 unsigned long flags, int pc,
2862 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002863{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002864 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002865
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002866 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002867 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002868 * Note, we can still get here via blktrace, wakeup tracer
2869 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002870 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002871 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002872 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002873 ftrace_trace_userstack(buffer, flags, pc);
2874}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002875
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002876/*
2877 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2878 */
2879void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002880trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002881 struct ring_buffer_event *event)
2882{
2883 __buffer_unlock_commit(buffer, event);
2884}
2885
Ingo Molnare309b412008-05-12 21:20:51 +02002886void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002887trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002888 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2889 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002890{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002891 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002892 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002893 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002894 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002895
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002896 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2897 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002898 if (!event)
2899 return;
2900 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002901 entry->ip = ip;
2902 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002903
Chunyan Zhang478409d2016-11-21 15:57:18 +08002904 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002905 if (static_branch_unlikely(&trace_function_exports_enabled))
2906 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002907 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002908 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909}
2910
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002911#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002912
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002913/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2914#define FTRACE_KSTACK_NESTING 4
2915
2916#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2917
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002918struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002919 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002920};
2921
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002922
2923struct ftrace_stacks {
2924 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2925};
2926
2927static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002928static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2929
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002930static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002931 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002932 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002933{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002934 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002935 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002936 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002937 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002938 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002939 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002940
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002941 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002942 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002943 * If regs is set, then these functions will not be in the way.
2944 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002945#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002946 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002947 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002948#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002949
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002950 preempt_disable_notrace();
2951
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002952 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2953
2954 /* This should never happen. If it does, yell once and skip */
2955 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2956 goto out;
2957
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002958 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002959 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2960 * interrupt will either see the value pre increment or post
2961 * increment. If the interrupt happens pre increment it will have
2962 * restored the counter when it returns. We just need a barrier to
2963 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002964 */
2965 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002966
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002967 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002968 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002969
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002970 if (regs) {
2971 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2972 size, skip);
2973 } else {
2974 nr_entries = stack_trace_save(fstack->calls, size, skip);
2975 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002976
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002977 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002978 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2979 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002980 if (!event)
2981 goto out;
2982 entry = ring_buffer_event_data(event);
2983
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002984 memcpy(&entry->caller, fstack->calls, size);
2985 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002986
Tom Zanussif306cc82013-10-24 08:34:17 -05002987 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002988 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002989
2990 out:
2991 /* Again, don't let gcc optimize things here */
2992 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002993 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002994 preempt_enable_notrace();
2995
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002996}
2997
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002998static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002999 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003000 unsigned long flags,
3001 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003002{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003003 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003004 return;
3005
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003006 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003007}
3008
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003009void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3010 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04003011{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003012 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003013
3014 if (rcu_is_watching()) {
3015 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3016 return;
3017 }
3018
3019 /*
3020 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3021 * but if the above rcu_is_watching() failed, then the NMI
3022 * triggered someplace critical, and rcu_irq_enter() should
3023 * not be called from NMI.
3024 */
3025 if (unlikely(in_nmi()))
3026 return;
3027
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003028 rcu_irq_enter_irqson();
3029 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3030 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003031}
3032
Steven Rostedt03889382009-12-11 09:48:22 -05003033/**
3034 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003035 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003036 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003037void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003038{
3039 unsigned long flags;
3040
3041 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003042 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003043
3044 local_save_flags(flags);
3045
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003046#ifndef CONFIG_UNWINDER_ORC
3047 /* Skip 1 to skip this function. */
3048 skip++;
3049#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003050 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003051 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003052}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003053EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003054
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003055#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003056static DEFINE_PER_CPU(int, user_stack_count);
3057
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003058static void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003059ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003060{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003061 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003062 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003063 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003064
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003065 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003066 return;
3067
Steven Rostedtb6345872010-03-12 20:03:30 -05003068 /*
3069 * NMIs can not handle page faults, even with fix ups.
3070 * The save user stack can (and often does) fault.
3071 */
3072 if (unlikely(in_nmi()))
3073 return;
3074
Steven Rostedt91e86e52010-11-10 12:56:12 +01003075 /*
3076 * prevent recursion, since the user stack tracing may
3077 * trigger other kernel events.
3078 */
3079 preempt_disable();
3080 if (__this_cpu_read(user_stack_count))
3081 goto out;
3082
3083 __this_cpu_inc(user_stack_count);
3084
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003085 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3086 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003087 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003088 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003089 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003090
Steven Rostedt48659d32009-09-11 11:36:23 -04003091 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003092 memset(&entry->caller, 0, sizeof(entry->caller));
3093
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003094 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003095 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003096 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003097
Li Zefan1dbd1952010-12-09 15:47:56 +08003098 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003099 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003100 out:
3101 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003102}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003103#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003104static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003105 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003106{
Török Edwin02b67512008-11-22 13:28:47 +02003107}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003108#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003109
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003110#endif /* CONFIG_STACKTRACE */
3111
Steven Rostedt07d777f2011-09-22 14:01:55 -04003112/* created for use with alloc_percpu */
3113struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003114 int nesting;
3115 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003116};
3117
3118static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003119
3120/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003121 * Thise allows for lockless recording. If we're nested too deeply, then
3122 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003123 */
3124static char *get_trace_buf(void)
3125{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003126 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003127
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003128 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003129 return NULL;
3130
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003131 buffer->nesting++;
3132
3133 /* Interrupts must see nesting incremented before we use the buffer */
3134 barrier();
3135 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003136}
3137
3138static void put_trace_buf(void)
3139{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003140 /* Don't let the decrement of nesting leak before this */
3141 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003142 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003143}
3144
3145static int alloc_percpu_trace_buffer(void)
3146{
3147 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003148
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003149 if (trace_percpu_buffer)
3150 return 0;
3151
Steven Rostedt07d777f2011-09-22 14:01:55 -04003152 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003153 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003154 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003155
3156 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003157 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003158}
3159
Steven Rostedt81698832012-10-11 10:15:05 -04003160static int buffers_allocated;
3161
Steven Rostedt07d777f2011-09-22 14:01:55 -04003162void trace_printk_init_buffers(void)
3163{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003164 if (buffers_allocated)
3165 return;
3166
3167 if (alloc_percpu_trace_buffer())
3168 return;
3169
Steven Rostedt2184db42014-05-28 13:14:40 -04003170 /* trace_printk() is for debug use only. Don't use it in production. */
3171
Joe Perchesa395d6a2016-03-22 14:28:09 -07003172 pr_warn("\n");
3173 pr_warn("**********************************************************\n");
3174 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3175 pr_warn("** **\n");
3176 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3177 pr_warn("** **\n");
3178 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3179 pr_warn("** unsafe for production use. **\n");
3180 pr_warn("** **\n");
3181 pr_warn("** If you see this message and you are not debugging **\n");
3182 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3183 pr_warn("** **\n");
3184 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3185 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003186
Steven Rostedtb382ede62012-10-10 21:44:34 -04003187 /* Expand the buffers to set size */
3188 tracing_update_buffers();
3189
Steven Rostedt07d777f2011-09-22 14:01:55 -04003190 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003191
3192 /*
3193 * trace_printk_init_buffers() can be called by modules.
3194 * If that happens, then we need to start cmdline recording
3195 * directly here. If the global_trace.buffer is already
3196 * allocated here, then this was called by module code.
3197 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003198 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003199 tracing_start_cmdline_record();
3200}
Divya Indif45d1222019-03-20 11:28:51 -07003201EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003202
3203void trace_printk_start_comm(void)
3204{
3205 /* Start tracing comms if trace printk is set */
3206 if (!buffers_allocated)
3207 return;
3208 tracing_start_cmdline_record();
3209}
3210
3211static void trace_printk_start_stop_comm(int enabled)
3212{
3213 if (!buffers_allocated)
3214 return;
3215
3216 if (enabled)
3217 tracing_start_cmdline_record();
3218 else
3219 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003220}
3221
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003222/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003223 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003224 * @ip: The address of the caller
3225 * @fmt: The string format to write to the buffer
3226 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003227 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003228int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003229{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003230 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003231 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003232 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003233 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003234 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003235 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003236 char *tbuffer;
3237 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238
3239 if (unlikely(tracing_selftest_running || tracing_disabled))
3240 return 0;
3241
3242 /* Don't pollute graph traces with trace_vprintk internals */
3243 pause_graph_tracing();
3244
3245 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003246 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003247
Steven Rostedt07d777f2011-09-22 14:01:55 -04003248 tbuffer = get_trace_buf();
3249 if (!tbuffer) {
3250 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003251 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003252 }
3253
3254 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3255
3256 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003257 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003258
Steven Rostedt07d777f2011-09-22 14:01:55 -04003259 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003260 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003261 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003262 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003263 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3264 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003265 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003266 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003267 entry = ring_buffer_event_data(event);
3268 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003269 entry->fmt = fmt;
3270
Steven Rostedt07d777f2011-09-22 14:01:55 -04003271 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003272 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003273 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003274 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003275 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003276
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003277out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003278 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003279out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003280 put_trace_buf();
3281
3282out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003283 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003284 unpause_graph_tracing();
3285
3286 return len;
3287}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003288EXPORT_SYMBOL_GPL(trace_vbprintk);
3289
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003290__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003291static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003292__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003293 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003294{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003295 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003296 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003297 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003298 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003299 unsigned long flags;
3300 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003301
3302 if (tracing_disabled || tracing_selftest_running)
3303 return 0;
3304
Steven Rostedt07d777f2011-09-22 14:01:55 -04003305 /* Don't pollute graph traces with trace_vprintk internals */
3306 pause_graph_tracing();
3307
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003308 pc = preempt_count();
3309 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003310
Steven Rostedt07d777f2011-09-22 14:01:55 -04003311
3312 tbuffer = get_trace_buf();
3313 if (!tbuffer) {
3314 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003315 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003316 }
3317
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003318 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003319
Steven Rostedt07d777f2011-09-22 14:01:55 -04003320 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003321 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003322 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003323 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3324 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003325 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003326 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003327 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003328 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003329
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003330 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003331 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003332 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003333 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003334 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003335
3336out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003337 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003338 put_trace_buf();
3339
3340out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003341 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003342 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003343
3344 return len;
3345}
Steven Rostedt659372d2009-09-03 19:11:07 -04003346
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003347__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003348int trace_array_vprintk(struct trace_array *tr,
3349 unsigned long ip, const char *fmt, va_list args)
3350{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003351 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003352}
3353
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003354/**
3355 * trace_array_printk - Print a message to a specific instance
3356 * @tr: The instance trace_array descriptor
3357 * @ip: The instruction pointer that this is called from.
3358 * @fmt: The format to print (printf format)
3359 *
3360 * If a subsystem sets up its own instance, they have the right to
3361 * printk strings into their tracing instance buffer using this
3362 * function. Note, this function will not write into the top level
3363 * buffer (use trace_printk() for that), as writing into the top level
3364 * buffer should only have events that can be individually disabled.
3365 * trace_printk() is only used for debugging a kernel, and should not
3366 * be ever encorporated in normal use.
3367 *
3368 * trace_array_printk() can be used, as it will not add noise to the
3369 * top level tracing buffer.
3370 *
3371 * Note, trace_array_init_printk() must be called on @tr before this
3372 * can be used.
3373 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003374__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003375int trace_array_printk(struct trace_array *tr,
3376 unsigned long ip, const char *fmt, ...)
3377{
3378 int ret;
3379 va_list ap;
3380
Divya Indi953ae452019-08-14 10:55:25 -07003381 if (!tr)
3382 return -ENOENT;
3383
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003384 /* This is only allowed for created instances */
3385 if (tr == &global_trace)
3386 return 0;
3387
3388 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3389 return 0;
3390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003391 va_start(ap, fmt);
3392 ret = trace_array_vprintk(tr, ip, fmt, ap);
3393 va_end(ap);
3394 return ret;
3395}
Divya Indif45d1222019-03-20 11:28:51 -07003396EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003397
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003398/**
3399 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3400 * @tr: The trace array to initialize the buffers for
3401 *
3402 * As trace_array_printk() only writes into instances, they are OK to
3403 * have in the kernel (unlike trace_printk()). This needs to be called
3404 * before trace_array_printk() can be used on a trace_array.
3405 */
3406int trace_array_init_printk(struct trace_array *tr)
3407{
3408 if (!tr)
3409 return -ENOENT;
3410
3411 /* This is only allowed for created instances */
3412 if (tr == &global_trace)
3413 return -EINVAL;
3414
3415 return alloc_percpu_trace_buffer();
3416}
3417EXPORT_SYMBOL_GPL(trace_array_init_printk);
3418
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003419__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003420int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003421 unsigned long ip, const char *fmt, ...)
3422{
3423 int ret;
3424 va_list ap;
3425
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003426 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 return 0;
3428
3429 va_start(ap, fmt);
3430 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3431 va_end(ap);
3432 return ret;
3433}
3434
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003435__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003436int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3437{
Steven Rostedta813a152009-10-09 01:41:35 -04003438 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003439}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003440EXPORT_SYMBOL_GPL(trace_vprintk);
3441
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003442static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003443{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003444 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3445
Steven Rostedt5a90f572008-09-03 17:42:51 -04003446 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003447 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003448 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003449}
3450
Ingo Molnare309b412008-05-12 21:20:51 +02003451static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003452peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3453 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003454{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003455 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003456 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003457
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003458 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003459 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003460 if (lost_events)
3461 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3462 (unsigned long)-1 : 0;
3463 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003464 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003465 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003466 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003467
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003468 if (event) {
3469 iter->ent_size = ring_buffer_event_length(event);
3470 return ring_buffer_event_data(event);
3471 }
3472 iter->ent_size = 0;
3473 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003474}
Steven Rostedtd7690412008-10-01 00:29:53 -04003475
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003476static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003477__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3478 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003479{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003480 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003481 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003482 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003483 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003484 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003486 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 int cpu;
3488
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003489 /*
3490 * If we are in a per_cpu trace file, don't bother by iterating over
3491 * all cpu and peek directly.
3492 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003493 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003494 if (ring_buffer_empty_cpu(buffer, cpu_file))
3495 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003496 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003497 if (ent_cpu)
3498 *ent_cpu = cpu_file;
3499
3500 return ent;
3501 }
3502
Steven Rostedtab464282008-05-12 21:21:00 +02003503 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003504
3505 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003506 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003507
Steven Rostedtbc21b472010-03-31 19:49:26 -04003508 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003509
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003510 /*
3511 * Pick the entry with the smallest timestamp:
3512 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003513 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003514 next = ent;
3515 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003516 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003517 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003518 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003519 }
3520 }
3521
Steven Rostedt12b5da32012-03-27 10:43:28 -04003522 iter->ent_size = next_size;
3523
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003524 if (ent_cpu)
3525 *ent_cpu = next_cpu;
3526
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003527 if (ent_ts)
3528 *ent_ts = next_ts;
3529
Steven Rostedtbc21b472010-03-31 19:49:26 -04003530 if (missing_events)
3531 *missing_events = next_lost;
3532
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 return next;
3534}
3535
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003536#define STATIC_TEMP_BUF_SIZE 128
3537static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3538
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003539/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003540struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3541 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003542{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003543 /* __find_next_entry will reset ent_size */
3544 int ent_size = iter->ent_size;
3545 struct trace_entry *entry;
3546
3547 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003548 * If called from ftrace_dump(), then the iter->temp buffer
3549 * will be the static_temp_buf and not created from kmalloc.
3550 * If the entry size is greater than the buffer, we can
3551 * not save it. Just return NULL in that case. This is only
3552 * used to add markers when two consecutive events' time
3553 * stamps have a large delta. See trace_print_lat_context()
3554 */
3555 if (iter->temp == static_temp_buf &&
3556 STATIC_TEMP_BUF_SIZE < ent_size)
3557 return NULL;
3558
3559 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003560 * The __find_next_entry() may call peek_next_entry(), which may
3561 * call ring_buffer_peek() that may make the contents of iter->ent
3562 * undefined. Need to copy iter->ent now.
3563 */
3564 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003565 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3566 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003567 kfree(iter->temp);
3568 iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
3569 if (!iter->temp)
3570 return NULL;
3571 }
3572 memcpy(iter->temp, iter->ent, iter->ent_size);
3573 iter->temp_size = iter->ent_size;
3574 iter->ent = iter->temp;
3575 }
3576 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3577 /* Put back the original ent_size */
3578 iter->ent_size = ent_size;
3579
3580 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003581}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003582
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003583/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003584void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003585{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003586 iter->ent = __find_next_entry(iter, &iter->cpu,
3587 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003588
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003589 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003590 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003591
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003592 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003593}
3594
Ingo Molnare309b412008-05-12 21:20:51 +02003595static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003596{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003597 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003598 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003599}
3600
Ingo Molnare309b412008-05-12 21:20:51 +02003601static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003602{
3603 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003605 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003606
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003607 WARN_ON_ONCE(iter->leftover);
3608
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609 (*pos)++;
3610
3611 /* can't go backwards */
3612 if (iter->idx > i)
3613 return NULL;
3614
3615 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003616 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003617 else
3618 ent = iter;
3619
3620 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003621 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003622
3623 iter->pos = *pos;
3624
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003625 return ent;
3626}
3627
Jason Wessel955b61e2010-08-05 09:22:23 -05003628void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003629{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003630 struct ring_buffer_iter *buf_iter;
3631 unsigned long entries = 0;
3632 u64 ts;
3633
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003634 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003635
Steven Rostedt6d158a82012-06-27 20:46:14 -04003636 buf_iter = trace_buffer_iter(iter, cpu);
3637 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003638 return;
3639
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003640 ring_buffer_iter_reset(buf_iter);
3641
3642 /*
3643 * We could have the case with the max latency tracers
3644 * that a reset never took place on a cpu. This is evident
3645 * by the timestamp being before the start of the buffer.
3646 */
YangHui69243722020-06-16 11:36:46 +08003647 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003648 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003649 break;
3650 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003651 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003652 }
3653
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003654 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003655}
3656
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003657/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003658 * The current tracer is copied to avoid a global locking
3659 * all around.
3660 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003661static void *s_start(struct seq_file *m, loff_t *pos)
3662{
3663 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003664 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003665 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003666 void *p = NULL;
3667 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003668 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003669
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003670 /*
3671 * copy the tracer to avoid using a global lock all around.
3672 * iter->trace is a copy of current_trace, the pointer to the
3673 * name may be used instead of a strcmp(), as iter->trace->name
3674 * will point to the same string as current_trace->name.
3675 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003676 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003677 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3678 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003679 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003680
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003681#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003682 if (iter->snapshot && iter->trace->use_max_tr)
3683 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003684#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003685
3686 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003687 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003688
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003689 if (*pos != iter->pos) {
3690 iter->ent = NULL;
3691 iter->cpu = 0;
3692 iter->idx = -1;
3693
Steven Rostedtae3b5092013-01-23 15:22:59 -05003694 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003695 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003696 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003697 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003698 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003699
Lai Jiangshanac91d852010-03-02 17:54:50 +08003700 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003701 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3702 ;
3703
3704 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003705 /*
3706 * If we overflowed the seq_file before, then we want
3707 * to just reuse the trace_seq buffer again.
3708 */
3709 if (iter->leftover)
3710 p = iter;
3711 else {
3712 l = *pos - 1;
3713 p = s_next(m, p, &l);
3714 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003715 }
3716
Lai Jiangshan4f535962009-05-18 19:35:34 +08003717 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003718 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003719 return p;
3720}
3721
3722static void s_stop(struct seq_file *m, void *p)
3723{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003724 struct trace_iterator *iter = m->private;
3725
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003726#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003727 if (iter->snapshot && iter->trace->use_max_tr)
3728 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003729#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003730
3731 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003732 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003733
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003734 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003735 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003736}
3737
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003738static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003739get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003740 unsigned long *entries, int cpu)
3741{
3742 unsigned long count;
3743
3744 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3745 /*
3746 * If this buffer has skipped entries, then we hold all
3747 * entries for the trace and we need to ignore the
3748 * ones before the time stamp.
3749 */
3750 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3751 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3752 /* total is the same as the entries */
3753 *total = count;
3754 } else
3755 *total = count +
3756 ring_buffer_overrun_cpu(buf->buffer, cpu);
3757 *entries = count;
3758}
3759
3760static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003761get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003762 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003763{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003764 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003765 int cpu;
3766
3767 *total = 0;
3768 *entries = 0;
3769
3770 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003771 get_total_entries_cpu(buf, &t, &e, cpu);
3772 *total += t;
3773 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003774 }
3775}
3776
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003777unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3778{
3779 unsigned long total, entries;
3780
3781 if (!tr)
3782 tr = &global_trace;
3783
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003784 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003785
3786 return entries;
3787}
3788
3789unsigned long trace_total_entries(struct trace_array *tr)
3790{
3791 unsigned long total, entries;
3792
3793 if (!tr)
3794 tr = &global_trace;
3795
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003796 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003797
3798 return entries;
3799}
3800
Ingo Molnare309b412008-05-12 21:20:51 +02003801static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003802{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003803 seq_puts(m, "# _------=> CPU# \n"
3804 "# / _-----=> irqs-off \n"
3805 "# | / _----=> need-resched \n"
3806 "# || / _---=> hardirq/softirq \n"
3807 "# ||| / _--=> preempt-depth \n"
3808 "# |||| / delay \n"
3809 "# cmd pid ||||| time | caller \n"
3810 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003811}
3812
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003813static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003814{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003815 unsigned long total;
3816 unsigned long entries;
3817
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003818 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003819 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3820 entries, total, num_online_cpus());
3821 seq_puts(m, "#\n");
3822}
3823
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003824static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003825 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003826{
Joel Fernandes441dae82017-06-25 22:38:43 -07003827 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3828
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003829 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003830
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003831 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3832 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003833}
3834
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003835static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003836 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003837{
Joel Fernandes441dae82017-06-25 22:38:43 -07003838 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003839 const char *space = " ";
3840 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003841
Quentin Perret9e738212019-02-14 15:29:50 +00003842 print_event_info(buf, m);
3843
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003844 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3845 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3846 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3847 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3848 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3849 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3850 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003851}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003852
Jiri Olsa62b915f2010-04-02 19:01:22 +02003853void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003854print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3855{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003856 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003857 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003858 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003859 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003860 unsigned long entries;
3861 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003862 const char *name = "preemption";
3863
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003864 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003865
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003866 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003868 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003869 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003870 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003872 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003874 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003875 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003876 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003877 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003878#if defined(CONFIG_PREEMPT_NONE)
3879 "server",
3880#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3881 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003882#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003883 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003884#elif defined(CONFIG_PREEMPT_RT)
3885 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003886#else
3887 "unknown",
3888#endif
3889 /* These are reserved for later use */
3890 0, 0, 0, 0);
3891#ifdef CONFIG_SMP
3892 seq_printf(m, " #P:%d)\n", num_online_cpus());
3893#else
3894 seq_puts(m, ")\n");
3895#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003896 seq_puts(m, "# -----------------\n");
3897 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003898 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003899 data->comm, data->pid,
3900 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003901 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003902 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003903
3904 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003905 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003906 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3907 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003908 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003909 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3910 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003911 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912 }
3913
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003914 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003915}
3916
Steven Rostedta3097202008-11-07 22:36:02 -05003917static void test_cpu_buff_start(struct trace_iterator *iter)
3918{
3919 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003920 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003921
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003922 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003923 return;
3924
3925 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3926 return;
3927
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003928 if (cpumask_available(iter->started) &&
3929 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003930 return;
3931
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003932 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003933 return;
3934
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003935 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003936 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003937
3938 /* Don't print started cpu buffer for the first entry of the trace */
3939 if (iter->idx > 1)
3940 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3941 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003942}
3943
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003944static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003945{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003946 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003947 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003948 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003949 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003950 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003951
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003952 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003953
Steven Rostedta3097202008-11-07 22:36:02 -05003954 test_cpu_buff_start(iter);
3955
Steven Rostedtf633cef2008-12-23 23:24:13 -05003956 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003957
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003958 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003959 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3960 trace_print_lat_context(iter);
3961 else
3962 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003963 }
3964
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003965 if (trace_seq_has_overflowed(s))
3966 return TRACE_TYPE_PARTIAL_LINE;
3967
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003968 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003969 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003970
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003971 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003972
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003973 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003974}
3975
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003976static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003977{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003978 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003979 struct trace_seq *s = &iter->seq;
3980 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003981 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003982
3983 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003984
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003985 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003986 trace_seq_printf(s, "%d %d %llu ",
3987 entry->pid, iter->cpu, iter->ts);
3988
3989 if (trace_seq_has_overflowed(s))
3990 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003991
Steven Rostedtf633cef2008-12-23 23:24:13 -05003992 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003993 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003994 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003995
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003996 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003997
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003998 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003999}
4000
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004001static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004002{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004003 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004004 struct trace_seq *s = &iter->seq;
4005 unsigned char newline = '\n';
4006 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004007 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004008
4009 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004010
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004011 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004012 SEQ_PUT_HEX_FIELD(s, entry->pid);
4013 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4014 SEQ_PUT_HEX_FIELD(s, iter->ts);
4015 if (trace_seq_has_overflowed(s))
4016 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004017 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004018
Steven Rostedtf633cef2008-12-23 23:24:13 -05004019 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004020 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004021 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004022 if (ret != TRACE_TYPE_HANDLED)
4023 return ret;
4024 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004025
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004026 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004027
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004028 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004029}
4030
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004031static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004032{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004033 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004034 struct trace_seq *s = &iter->seq;
4035 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004036 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004037
4038 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004039
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004040 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004041 SEQ_PUT_FIELD(s, entry->pid);
4042 SEQ_PUT_FIELD(s, iter->cpu);
4043 SEQ_PUT_FIELD(s, iter->ts);
4044 if (trace_seq_has_overflowed(s))
4045 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004046 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004047
Steven Rostedtf633cef2008-12-23 23:24:13 -05004048 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004049 return event ? event->funcs->binary(iter, 0, event) :
4050 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004051}
4052
Jiri Olsa62b915f2010-04-02 19:01:22 +02004053int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004054{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004055 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004056 int cpu;
4057
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004058 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004059 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004060 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004061 buf_iter = trace_buffer_iter(iter, cpu);
4062 if (buf_iter) {
4063 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004064 return 0;
4065 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004066 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004067 return 0;
4068 }
4069 return 1;
4070 }
4071
Steven Rostedtab464282008-05-12 21:21:00 +02004072 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004073 buf_iter = trace_buffer_iter(iter, cpu);
4074 if (buf_iter) {
4075 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004076 return 0;
4077 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004078 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004079 return 0;
4080 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004081 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004082
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004083 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004084}
4085
Lai Jiangshan4f535962009-05-18 19:35:34 +08004086/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004087enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004088{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004089 struct trace_array *tr = iter->tr;
4090 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004091 enum print_line_t ret;
4092
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004093 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004094 if (iter->lost_events == (unsigned long)-1)
4095 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4096 iter->cpu);
4097 else
4098 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4099 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004100 if (trace_seq_has_overflowed(&iter->seq))
4101 return TRACE_TYPE_PARTIAL_LINE;
4102 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004103
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004104 if (iter->trace && iter->trace->print_line) {
4105 ret = iter->trace->print_line(iter);
4106 if (ret != TRACE_TYPE_UNHANDLED)
4107 return ret;
4108 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004109
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004110 if (iter->ent->type == TRACE_BPUTS &&
4111 trace_flags & TRACE_ITER_PRINTK &&
4112 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4113 return trace_print_bputs_msg_only(iter);
4114
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004115 if (iter->ent->type == TRACE_BPRINT &&
4116 trace_flags & TRACE_ITER_PRINTK &&
4117 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004118 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004119
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004120 if (iter->ent->type == TRACE_PRINT &&
4121 trace_flags & TRACE_ITER_PRINTK &&
4122 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004123 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004124
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004125 if (trace_flags & TRACE_ITER_BIN)
4126 return print_bin_fmt(iter);
4127
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004128 if (trace_flags & TRACE_ITER_HEX)
4129 return print_hex_fmt(iter);
4130
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004131 if (trace_flags & TRACE_ITER_RAW)
4132 return print_raw_fmt(iter);
4133
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004134 return print_trace_fmt(iter);
4135}
4136
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004137void trace_latency_header(struct seq_file *m)
4138{
4139 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004140 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004141
4142 /* print nothing if the buffers are empty */
4143 if (trace_empty(iter))
4144 return;
4145
4146 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4147 print_trace_header(m, iter);
4148
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004149 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004150 print_lat_help_header(m);
4151}
4152
Jiri Olsa62b915f2010-04-02 19:01:22 +02004153void trace_default_header(struct seq_file *m)
4154{
4155 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004156 struct trace_array *tr = iter->tr;
4157 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004158
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004159 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4160 return;
4161
Jiri Olsa62b915f2010-04-02 19:01:22 +02004162 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4163 /* print nothing if the buffers are empty */
4164 if (trace_empty(iter))
4165 return;
4166 print_trace_header(m, iter);
4167 if (!(trace_flags & TRACE_ITER_VERBOSE))
4168 print_lat_help_header(m);
4169 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004170 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4171 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004172 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004173 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004174 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004175 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004176 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004177 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004178 }
4179}
4180
Steven Rostedte0a413f2011-09-29 21:26:16 -04004181static void test_ftrace_alive(struct seq_file *m)
4182{
4183 if (!ftrace_is_dead())
4184 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004185 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4186 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004187}
4188
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004189#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004190static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004191{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004192 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4193 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4194 "# Takes a snapshot of the main buffer.\n"
4195 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4196 "# (Doesn't have to be '2' works with any number that\n"
4197 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004198}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004199
4200static void show_snapshot_percpu_help(struct seq_file *m)
4201{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004202 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004203#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004204 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4205 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004206#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004207 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4208 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004209#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004210 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4211 "# (Doesn't have to be '2' works with any number that\n"
4212 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004213}
4214
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004215static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4216{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004217 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004218 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004219 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004220 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004221
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004222 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004223 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4224 show_snapshot_main_help(m);
4225 else
4226 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004227}
4228#else
4229/* Should never be called */
4230static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4231#endif
4232
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233static int s_show(struct seq_file *m, void *v)
4234{
4235 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004236 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237
4238 if (iter->ent == NULL) {
4239 if (iter->tr) {
4240 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4241 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004242 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004243 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004244 if (iter->snapshot && trace_empty(iter))
4245 print_snapshot_help(m, iter);
4246 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004247 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004248 else
4249 trace_default_header(m);
4250
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004251 } else if (iter->leftover) {
4252 /*
4253 * If we filled the seq_file buffer earlier, we
4254 * want to just show it now.
4255 */
4256 ret = trace_print_seq(m, &iter->seq);
4257
4258 /* ret should this time be zero, but you never know */
4259 iter->leftover = ret;
4260
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004261 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004262 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004263 ret = trace_print_seq(m, &iter->seq);
4264 /*
4265 * If we overflow the seq_file buffer, then it will
4266 * ask us for this data again at start up.
4267 * Use that instead.
4268 * ret is 0 if seq_file write succeeded.
4269 * -1 otherwise.
4270 */
4271 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004272 }
4273
4274 return 0;
4275}
4276
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004277/*
4278 * Should be used after trace_array_get(), trace_types_lock
4279 * ensures that i_cdev was already initialized.
4280 */
4281static inline int tracing_get_cpu(struct inode *inode)
4282{
4283 if (inode->i_cdev) /* See trace_create_cpu_file() */
4284 return (long)inode->i_cdev - 1;
4285 return RING_BUFFER_ALL_CPUS;
4286}
4287
James Morris88e9d342009-09-22 16:43:43 -07004288static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004289 .start = s_start,
4290 .next = s_next,
4291 .stop = s_stop,
4292 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004293};
4294
Ingo Molnare309b412008-05-12 21:20:51 +02004295static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004296__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004297{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004298 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004299 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004300 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004301
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004302 if (tracing_disabled)
4303 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004304
Jiri Olsa50e18b92012-04-25 10:23:39 +02004305 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004306 if (!iter)
4307 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004308
Gil Fruchter72917232015-06-09 10:32:35 +03004309 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004310 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004311 if (!iter->buffer_iter)
4312 goto release;
4313
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004314 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004315 * trace_find_next_entry() may need to save off iter->ent.
4316 * It will place it into the iter->temp buffer. As most
4317 * events are less than 128, allocate a buffer of that size.
4318 * If one is greater, then trace_find_next_entry() will
4319 * allocate a new buffer to adjust for the bigger iter->ent.
4320 * It's not critical if it fails to get allocated here.
4321 */
4322 iter->temp = kmalloc(128, GFP_KERNEL);
4323 if (iter->temp)
4324 iter->temp_size = 128;
4325
4326 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004327 * We make a copy of the current tracer to avoid concurrent
4328 * changes on it while we are reading.
4329 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004330 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004331 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004332 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004333 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004334
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004335 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336
Li Zefan79f55992009-06-15 14:58:26 +08004337 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004338 goto fail;
4339
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004340 iter->tr = tr;
4341
4342#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004343 /* Currently only the top directory has a snapshot */
4344 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004345 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004346 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004347#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004348 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004349 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004350 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004351 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004352 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004353
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004354 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004355 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004356 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004357
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004358 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004359 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004360 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4361
David Sharp8be07092012-11-13 12:18:22 -08004362 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004363 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004364 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4365
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004366 /*
4367 * If pause-on-trace is enabled, then stop the trace while
4368 * dumping, unless this is the "snapshot" file
4369 */
4370 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004371 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004372
Steven Rostedtae3b5092013-01-23 15:22:59 -05004373 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004374 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004375 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004376 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004377 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004378 }
4379 ring_buffer_read_prepare_sync();
4380 for_each_tracing_cpu(cpu) {
4381 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004382 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004383 }
4384 } else {
4385 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004386 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004387 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004388 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004389 ring_buffer_read_prepare_sync();
4390 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004391 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004392 }
4393
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004394 mutex_unlock(&trace_types_lock);
4395
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004396 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004397
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004398 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004399 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004400 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004401 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004402 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004403release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004404 seq_release_private(inode, file);
4405 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004406}
4407
4408int tracing_open_generic(struct inode *inode, struct file *filp)
4409{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004410 int ret;
4411
4412 ret = tracing_check_open_get_tr(NULL);
4413 if (ret)
4414 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004415
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004416 filp->private_data = inode->i_private;
4417 return 0;
4418}
4419
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004420bool tracing_is_disabled(void)
4421{
4422 return (tracing_disabled) ? true: false;
4423}
4424
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004425/*
4426 * Open and update trace_array ref count.
4427 * Must have the current trace_array passed to it.
4428 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004429int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004430{
4431 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004432 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004433
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004434 ret = tracing_check_open_get_tr(tr);
4435 if (ret)
4436 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004437
4438 filp->private_data = inode->i_private;
4439
4440 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004441}
4442
Hannes Eder4fd27352009-02-10 19:44:12 +01004443static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004444{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004445 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004446 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004447 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004448 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004449
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004450 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004451 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004452 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004453 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004454
Oleg Nesterov6484c712013-07-23 17:26:10 +02004455 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004456 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004457 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004458
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004459 for_each_tracing_cpu(cpu) {
4460 if (iter->buffer_iter[cpu])
4461 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4462 }
4463
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004464 if (iter->trace && iter->trace->close)
4465 iter->trace->close(iter);
4466
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004467 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004468 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004469 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004470
4471 __trace_array_put(tr);
4472
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004473 mutex_unlock(&trace_types_lock);
4474
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004475 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004476 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004477 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004478 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004479 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004480 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004481
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004482 return 0;
4483}
4484
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004485static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4486{
4487 struct trace_array *tr = inode->i_private;
4488
4489 trace_array_put(tr);
4490 return 0;
4491}
4492
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004493static int tracing_single_release_tr(struct inode *inode, struct file *file)
4494{
4495 struct trace_array *tr = inode->i_private;
4496
4497 trace_array_put(tr);
4498
4499 return single_release(inode, file);
4500}
4501
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004502static int tracing_open(struct inode *inode, struct file *file)
4503{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004504 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004505 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004506 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004507
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004508 ret = tracing_check_open_get_tr(tr);
4509 if (ret)
4510 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004511
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004512 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004513 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4514 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004515 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004516
4517#ifdef CONFIG_TRACER_MAX_TRACE
4518 if (tr->current_trace->print_max)
4519 trace_buf = &tr->max_buffer;
4520#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004521
4522 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004523 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004524 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004525 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004526 }
4527
4528 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004529 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004530 if (IS_ERR(iter))
4531 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004532 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004533 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4534 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004535
4536 if (ret < 0)
4537 trace_array_put(tr);
4538
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004539 return ret;
4540}
4541
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004542/*
4543 * Some tracers are not suitable for instance buffers.
4544 * A tracer is always available for the global array (toplevel)
4545 * or if it explicitly states that it is.
4546 */
4547static bool
4548trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4549{
4550 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4551}
4552
4553/* Find the next tracer that this trace array may use */
4554static struct tracer *
4555get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4556{
4557 while (t && !trace_ok_for_array(t, tr))
4558 t = t->next;
4559
4560 return t;
4561}
4562
Ingo Molnare309b412008-05-12 21:20:51 +02004563static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004564t_next(struct seq_file *m, void *v, loff_t *pos)
4565{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004566 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004567 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004568
4569 (*pos)++;
4570
4571 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004572 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004573
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004574 return t;
4575}
4576
4577static void *t_start(struct seq_file *m, loff_t *pos)
4578{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004579 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004580 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004581 loff_t l = 0;
4582
4583 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004584
4585 t = get_tracer_for_array(tr, trace_types);
4586 for (; t && l < *pos; t = t_next(m, t, &l))
4587 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004588
4589 return t;
4590}
4591
4592static void t_stop(struct seq_file *m, void *p)
4593{
4594 mutex_unlock(&trace_types_lock);
4595}
4596
4597static int t_show(struct seq_file *m, void *v)
4598{
4599 struct tracer *t = v;
4600
4601 if (!t)
4602 return 0;
4603
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004604 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004605 if (t->next)
4606 seq_putc(m, ' ');
4607 else
4608 seq_putc(m, '\n');
4609
4610 return 0;
4611}
4612
James Morris88e9d342009-09-22 16:43:43 -07004613static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004614 .start = t_start,
4615 .next = t_next,
4616 .stop = t_stop,
4617 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004618};
4619
4620static int show_traces_open(struct inode *inode, struct file *file)
4621{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004622 struct trace_array *tr = inode->i_private;
4623 struct seq_file *m;
4624 int ret;
4625
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004626 ret = tracing_check_open_get_tr(tr);
4627 if (ret)
4628 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004629
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004630 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004631 if (ret) {
4632 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004633 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004634 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004635
4636 m = file->private_data;
4637 m->private = tr;
4638
4639 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004640}
4641
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004642static int show_traces_release(struct inode *inode, struct file *file)
4643{
4644 struct trace_array *tr = inode->i_private;
4645
4646 trace_array_put(tr);
4647 return seq_release(inode, file);
4648}
4649
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004650static ssize_t
4651tracing_write_stub(struct file *filp, const char __user *ubuf,
4652 size_t count, loff_t *ppos)
4653{
4654 return count;
4655}
4656
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004657loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004658{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004659 int ret;
4660
Slava Pestov364829b2010-11-24 15:13:16 -08004661 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004662 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004663 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004664 file->f_pos = ret = 0;
4665
4666 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004667}
4668
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004669static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004670 .open = tracing_open,
4671 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004672 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004673 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004674 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004675};
4676
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004677static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004678 .open = show_traces_open,
4679 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004680 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004681 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004682};
4683
4684static ssize_t
4685tracing_cpumask_read(struct file *filp, char __user *ubuf,
4686 size_t count, loff_t *ppos)
4687{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004688 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004689 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004690 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004691
Changbin Du90e406f2017-11-30 11:39:43 +08004692 len = snprintf(NULL, 0, "%*pb\n",
4693 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4694 mask_str = kmalloc(len, GFP_KERNEL);
4695 if (!mask_str)
4696 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004697
Changbin Du90e406f2017-11-30 11:39:43 +08004698 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004699 cpumask_pr_args(tr->tracing_cpumask));
4700 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004701 count = -EINVAL;
4702 goto out_err;
4703 }
Changbin Du90e406f2017-11-30 11:39:43 +08004704 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004705
4706out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004707 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004708
4709 return count;
4710}
4711
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004712int tracing_set_cpumask(struct trace_array *tr,
4713 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004714{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004715 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304716
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004717 if (!tr)
4718 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004719
Steven Rostedta5e25882008-12-02 15:34:05 -05004720 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004721 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004722 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004723 /*
4724 * Increase/decrease the disabled counter if we are
4725 * about to flip a bit in the cpumask:
4726 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004727 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304728 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004729 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4730 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004731 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004732 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304733 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004734 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4735 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004736 }
4737 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004738 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004739 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004740
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004741 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004742
4743 return 0;
4744}
4745
4746static ssize_t
4747tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4748 size_t count, loff_t *ppos)
4749{
4750 struct trace_array *tr = file_inode(filp)->i_private;
4751 cpumask_var_t tracing_cpumask_new;
4752 int err;
4753
4754 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4755 return -ENOMEM;
4756
4757 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4758 if (err)
4759 goto err_free;
4760
4761 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4762 if (err)
4763 goto err_free;
4764
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304765 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004766
Ingo Molnarc7078de2008-05-12 21:20:52 +02004767 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004768
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004769err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004770 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004771
4772 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004773}
4774
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004775static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004776 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004777 .read = tracing_cpumask_read,
4778 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004779 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004780 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004781};
4782
Li Zefanfdb372e2009-12-08 11:15:59 +08004783static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004784{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004785 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004786 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004787 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004788 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004789
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004790 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004791 tracer_flags = tr->current_trace->flags->val;
4792 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004793
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004794 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004795 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004796 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004797 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004798 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004799 }
4800
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004801 for (i = 0; trace_opts[i].name; i++) {
4802 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004803 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004804 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004805 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004806 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004807 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004808
Li Zefanfdb372e2009-12-08 11:15:59 +08004809 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004810}
4811
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004812static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004813 struct tracer_flags *tracer_flags,
4814 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004815{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004816 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004817 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004818
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004819 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004820 if (ret)
4821 return ret;
4822
4823 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004824 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004825 else
Zhaolei77708412009-08-07 18:53:21 +08004826 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004827 return 0;
4828}
4829
Li Zefan8d18eaa2009-12-08 11:17:06 +08004830/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004831static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004832{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004833 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004834 struct tracer_flags *tracer_flags = trace->flags;
4835 struct tracer_opt *opts = NULL;
4836 int i;
4837
4838 for (i = 0; tracer_flags->opts[i].name; i++) {
4839 opts = &tracer_flags->opts[i];
4840
4841 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004842 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004843 }
4844
4845 return -EINVAL;
4846}
4847
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004848/* Some tracers require overwrite to stay enabled */
4849int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4850{
4851 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4852 return -1;
4853
4854 return 0;
4855}
4856
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004857int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004858{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004859 if ((mask == TRACE_ITER_RECORD_TGID) ||
4860 (mask == TRACE_ITER_RECORD_CMD))
4861 lockdep_assert_held(&event_mutex);
4862
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004863 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004864 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004865 return 0;
4866
4867 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004868 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004869 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004870 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004871
4872 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004873 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004874 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004875 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004876
4877 if (mask == TRACE_ITER_RECORD_CMD)
4878 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004879
Joel Fernandesd914ba32017-06-26 19:01:55 -07004880 if (mask == TRACE_ITER_RECORD_TGID) {
4881 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004882 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004883 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004884 GFP_KERNEL);
4885 if (!tgid_map) {
4886 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4887 return -ENOMEM;
4888 }
4889
4890 trace_event_enable_tgid_record(enabled);
4891 }
4892
Steven Rostedtc37775d2016-04-13 16:59:18 -04004893 if (mask == TRACE_ITER_EVENT_FORK)
4894 trace_event_follow_fork(tr, enabled);
4895
Namhyung Kim1e104862017-04-17 11:44:28 +09004896 if (mask == TRACE_ITER_FUNC_FORK)
4897 ftrace_pid_follow_fork(tr, enabled);
4898
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004899 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004900 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004901#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004902 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004903#endif
4904 }
Steven Rostedt81698832012-10-11 10:15:05 -04004905
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004906 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004907 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004908 trace_printk_control(enabled);
4909 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004910
4911 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004912}
4913
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004914int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004915{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004916 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004917 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004918 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004919 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004920 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004921
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004922 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004923
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004924 len = str_has_prefix(cmp, "no");
4925 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004926 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004927
4928 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004929
Prateek Sood3a53acf2019-12-10 09:15:16 +00004930 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004931 mutex_lock(&trace_types_lock);
4932
Yisheng Xie591a0332018-05-17 16:36:03 +08004933 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004934 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004935 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004936 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004937 else
4938 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004939
4940 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004941 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004942
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004943 /*
4944 * If the first trailing whitespace is replaced with '\0' by strstrip,
4945 * turn it back into a space.
4946 */
4947 if (orig_len > strlen(option))
4948 option[strlen(option)] = ' ';
4949
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004950 return ret;
4951}
4952
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004953static void __init apply_trace_boot_options(void)
4954{
4955 char *buf = trace_boot_options_buf;
4956 char *option;
4957
4958 while (true) {
4959 option = strsep(&buf, ",");
4960
4961 if (!option)
4962 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004963
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004964 if (*option)
4965 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004966
4967 /* Put back the comma to allow this to be called again */
4968 if (buf)
4969 *(buf - 1) = ',';
4970 }
4971}
4972
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004973static ssize_t
4974tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4975 size_t cnt, loff_t *ppos)
4976{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004977 struct seq_file *m = filp->private_data;
4978 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004979 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004980 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004981
4982 if (cnt >= sizeof(buf))
4983 return -EINVAL;
4984
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004985 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004986 return -EFAULT;
4987
Steven Rostedta8dd2172013-01-09 20:54:17 -05004988 buf[cnt] = 0;
4989
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004990 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004991 if (ret < 0)
4992 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004993
Jiri Olsacf8517c2009-10-23 19:36:16 -04004994 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004995
4996 return cnt;
4997}
4998
Li Zefanfdb372e2009-12-08 11:15:59 +08004999static int tracing_trace_options_open(struct inode *inode, struct file *file)
5000{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005001 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005002 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005003
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005004 ret = tracing_check_open_get_tr(tr);
5005 if (ret)
5006 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005007
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005008 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5009 if (ret < 0)
5010 trace_array_put(tr);
5011
5012 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005013}
5014
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005015static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005016 .open = tracing_trace_options_open,
5017 .read = seq_read,
5018 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005019 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005020 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005021};
5022
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005023static const char readme_msg[] =
5024 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005025 "# echo 0 > tracing_on : quick way to disable tracing\n"
5026 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5027 " Important files:\n"
5028 " trace\t\t\t- The static contents of the buffer\n"
5029 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5030 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5031 " current_tracer\t- function and latency tracers\n"
5032 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005033 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005034 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5035 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5036 " trace_clock\t\t-change the clock used to order events\n"
5037 " local: Per cpu clock but may not be synced across CPUs\n"
5038 " global: Synced across CPUs but slows tracing down.\n"
5039 " counter: Not a clock, but just an increment\n"
5040 " uptime: Jiffy counter from time of boot\n"
5041 " perf: Same clock that perf events use\n"
5042#ifdef CONFIG_X86_64
5043 " x86-tsc: TSC cycle counter\n"
5044#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005045 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5046 " delta: Delta difference against a buffer-wide timestamp\n"
5047 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005048 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005049 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005050 " tracing_cpumask\t- Limit which CPUs to trace\n"
5051 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5052 "\t\t\t Remove sub-buffer with rmdir\n"
5053 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005054 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005055 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005056 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005057#ifdef CONFIG_DYNAMIC_FTRACE
5058 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005059 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5060 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005061 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005062 "\t modules: Can select a group via module\n"
5063 "\t Format: :mod:<module-name>\n"
5064 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5065 "\t triggers: a command to perform when function is hit\n"
5066 "\t Format: <function>:<trigger>[:count]\n"
5067 "\t trigger: traceon, traceoff\n"
5068 "\t\t enable_event:<system>:<event>\n"
5069 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005070#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005071 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005072#endif
5073#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005074 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005075#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005076 "\t\t dump\n"
5077 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005078 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5079 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5080 "\t The first one will disable tracing every time do_fault is hit\n"
5081 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5082 "\t The first time do trap is hit and it disables tracing, the\n"
5083 "\t counter will decrement to 2. If tracing is already disabled,\n"
5084 "\t the counter will not decrement. It only decrements when the\n"
5085 "\t trigger did work\n"
5086 "\t To remove trigger without count:\n"
5087 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5088 "\t To remove trigger with a count:\n"
5089 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005090 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005091 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5092 "\t modules: Can select a group via module command :mod:\n"
5093 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005094#endif /* CONFIG_DYNAMIC_FTRACE */
5095#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005096 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5097 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005098 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5099 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005100#endif
5101#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5102 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005103 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005104 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5105#endif
5106#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005107 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5108 "\t\t\t snapshot buffer. Read the contents for more\n"
5109 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005110#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005111#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005112 " stack_trace\t\t- Shows the max stack trace when active\n"
5113 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005114 "\t\t\t Write into this file to reset the max size (trigger a\n"
5115 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005116#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005117 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5118 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005119#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005120#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005121#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005122 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005123 "\t\t\t Write into this file to define/undefine new trace events.\n"
5124#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005125#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005126 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005127 "\t\t\t Write into this file to define/undefine new trace events.\n"
5128#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005129#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005130 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005131 "\t\t\t Write into this file to define/undefine new trace events.\n"
5132#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005133#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005134 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005135 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5136 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005137#ifdef CONFIG_HIST_TRIGGERS
5138 "\t s:[synthetic/]<event> <field> [<field>]\n"
5139#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005140 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005141#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005142 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05305143 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005144#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005145#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05305146 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005147#endif
5148 "\t args: <name>=fetcharg[:type]\n"
5149 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005150#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005151 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005152#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005153 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005154#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005155 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005156 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005157 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005158 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005159#ifdef CONFIG_HIST_TRIGGERS
5160 "\t field: <stype> <name>;\n"
5161 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5162 "\t [unsigned] char/int/long\n"
5163#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005164#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005165 " events/\t\t- Directory containing all trace event subsystems:\n"
5166 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5167 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005168 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5169 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005170 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005171 " events/<system>/<event>/\t- Directory containing control files for\n"
5172 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005173 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5174 " filter\t\t- If set, only events passing filter are traced\n"
5175 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005176 "\t Format: <trigger>[:count][if <filter>]\n"
5177 "\t trigger: traceon, traceoff\n"
5178 "\t enable_event:<system>:<event>\n"
5179 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005180#ifdef CONFIG_HIST_TRIGGERS
5181 "\t enable_hist:<system>:<event>\n"
5182 "\t disable_hist:<system>:<event>\n"
5183#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005184#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005185 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005186#endif
5187#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005188 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005189#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005190#ifdef CONFIG_HIST_TRIGGERS
5191 "\t\t hist (see below)\n"
5192#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005193 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5194 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5195 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5196 "\t events/block/block_unplug/trigger\n"
5197 "\t The first disables tracing every time block_unplug is hit.\n"
5198 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5199 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5200 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5201 "\t Like function triggers, the counter is only decremented if it\n"
5202 "\t enabled or disabled tracing.\n"
5203 "\t To remove a trigger without a count:\n"
5204 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5205 "\t To remove a trigger with a count:\n"
5206 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5207 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005208#ifdef CONFIG_HIST_TRIGGERS
5209 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005210 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005211 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005212 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005213 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005214 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005215 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005216 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005217 "\t [if <filter>]\n\n"
5218 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005219 "\t table using the key(s) and value(s) named, and the value of a\n"
5220 "\t sum called 'hitcount' is incremented. Keys and values\n"
5221 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005222 "\t can be any field, or the special string 'stacktrace'.\n"
5223 "\t Compound keys consisting of up to two fields can be specified\n"
5224 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5225 "\t fields. Sort keys consisting of up to two fields can be\n"
5226 "\t specified using the 'sort' keyword. The sort direction can\n"
5227 "\t be modified by appending '.descending' or '.ascending' to a\n"
5228 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005229 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5230 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5231 "\t its histogram data will be shared with other triggers of the\n"
5232 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005233 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005234 "\t table in its entirety to stdout. If there are multiple hist\n"
5235 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005236 "\t trigger in the output. The table displayed for a named\n"
5237 "\t trigger will be the same as any other instance having the\n"
5238 "\t same name. The default format used to display a given field\n"
5239 "\t can be modified by appending any of the following modifiers\n"
5240 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005241 "\t .hex display a number as a hex value\n"
5242 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005243 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005244 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005245 "\t .syscall display a syscall id as a syscall name\n"
5246 "\t .log2 display log2 value rather than raw number\n"
5247 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005248 "\t The 'pause' parameter can be used to pause an existing hist\n"
5249 "\t trigger or to start a hist trigger but not log any events\n"
5250 "\t until told to do so. 'continue' can be used to start or\n"
5251 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005252 "\t The 'clear' parameter will clear the contents of a running\n"
5253 "\t hist trigger and leave its current paused/active state\n"
5254 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005255 "\t The enable_hist and disable_hist triggers can be used to\n"
5256 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005257 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005258 "\t the enable_event and disable_event triggers.\n\n"
5259 "\t Hist trigger handlers and actions are executed whenever a\n"
5260 "\t a histogram entry is added or updated. They take the form:\n\n"
5261 "\t <handler>.<action>\n\n"
5262 "\t The available handlers are:\n\n"
5263 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005264 "\t onmax(var) - invoke if var exceeds current max\n"
5265 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005266 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005267 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005268 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005269#ifdef CONFIG_TRACER_SNAPSHOT
5270 "\t snapshot() - snapshot the trace buffer\n"
5271#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005272#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005273;
5274
5275static ssize_t
5276tracing_readme_read(struct file *filp, char __user *ubuf,
5277 size_t cnt, loff_t *ppos)
5278{
5279 return simple_read_from_buffer(ubuf, cnt, ppos,
5280 readme_msg, strlen(readme_msg));
5281}
5282
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005283static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005284 .open = tracing_open_generic,
5285 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005286 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005287};
5288
Michael Sartain99c621d2017-07-05 22:07:15 -06005289static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5290{
5291 int *ptr = v;
5292
5293 if (*pos || m->count)
5294 ptr++;
5295
5296 (*pos)++;
5297
5298 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5299 if (trace_find_tgid(*ptr))
5300 return ptr;
5301 }
5302
5303 return NULL;
5304}
5305
5306static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5307{
5308 void *v;
5309 loff_t l = 0;
5310
5311 if (!tgid_map)
5312 return NULL;
5313
5314 v = &tgid_map[0];
5315 while (l <= *pos) {
5316 v = saved_tgids_next(m, v, &l);
5317 if (!v)
5318 return NULL;
5319 }
5320
5321 return v;
5322}
5323
5324static void saved_tgids_stop(struct seq_file *m, void *v)
5325{
5326}
5327
5328static int saved_tgids_show(struct seq_file *m, void *v)
5329{
5330 int pid = (int *)v - tgid_map;
5331
5332 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5333 return 0;
5334}
5335
5336static const struct seq_operations tracing_saved_tgids_seq_ops = {
5337 .start = saved_tgids_start,
5338 .stop = saved_tgids_stop,
5339 .next = saved_tgids_next,
5340 .show = saved_tgids_show,
5341};
5342
5343static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5344{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005345 int ret;
5346
5347 ret = tracing_check_open_get_tr(NULL);
5348 if (ret)
5349 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005350
5351 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5352}
5353
5354
5355static const struct file_operations tracing_saved_tgids_fops = {
5356 .open = tracing_saved_tgids_open,
5357 .read = seq_read,
5358 .llseek = seq_lseek,
5359 .release = seq_release,
5360};
5361
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005362static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005363{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005364 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005365
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005366 if (*pos || m->count)
5367 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005368
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005369 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005370
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005371 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5372 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005373 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005374 continue;
5375
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005376 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005377 }
5378
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005379 return NULL;
5380}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005381
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005382static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5383{
5384 void *v;
5385 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005386
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005387 preempt_disable();
5388 arch_spin_lock(&trace_cmdline_lock);
5389
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005390 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005391 while (l <= *pos) {
5392 v = saved_cmdlines_next(m, v, &l);
5393 if (!v)
5394 return NULL;
5395 }
5396
5397 return v;
5398}
5399
5400static void saved_cmdlines_stop(struct seq_file *m, void *v)
5401{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005402 arch_spin_unlock(&trace_cmdline_lock);
5403 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005404}
5405
5406static int saved_cmdlines_show(struct seq_file *m, void *v)
5407{
5408 char buf[TASK_COMM_LEN];
5409 unsigned int *pid = v;
5410
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005411 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005412 seq_printf(m, "%d %s\n", *pid, buf);
5413 return 0;
5414}
5415
5416static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5417 .start = saved_cmdlines_start,
5418 .next = saved_cmdlines_next,
5419 .stop = saved_cmdlines_stop,
5420 .show = saved_cmdlines_show,
5421};
5422
5423static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5424{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005425 int ret;
5426
5427 ret = tracing_check_open_get_tr(NULL);
5428 if (ret)
5429 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005430
5431 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005432}
5433
5434static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005435 .open = tracing_saved_cmdlines_open,
5436 .read = seq_read,
5437 .llseek = seq_lseek,
5438 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005439};
5440
5441static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005442tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5443 size_t cnt, loff_t *ppos)
5444{
5445 char buf[64];
5446 int r;
5447
5448 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005449 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005450 arch_spin_unlock(&trace_cmdline_lock);
5451
5452 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5453}
5454
5455static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5456{
5457 kfree(s->saved_cmdlines);
5458 kfree(s->map_cmdline_to_pid);
5459 kfree(s);
5460}
5461
5462static int tracing_resize_saved_cmdlines(unsigned int val)
5463{
5464 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5465
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005466 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005467 if (!s)
5468 return -ENOMEM;
5469
5470 if (allocate_cmdlines_buffer(val, s) < 0) {
5471 kfree(s);
5472 return -ENOMEM;
5473 }
5474
5475 arch_spin_lock(&trace_cmdline_lock);
5476 savedcmd_temp = savedcmd;
5477 savedcmd = s;
5478 arch_spin_unlock(&trace_cmdline_lock);
5479 free_saved_cmdlines_buffer(savedcmd_temp);
5480
5481 return 0;
5482}
5483
5484static ssize_t
5485tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5486 size_t cnt, loff_t *ppos)
5487{
5488 unsigned long val;
5489 int ret;
5490
5491 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5492 if (ret)
5493 return ret;
5494
5495 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5496 if (!val || val > PID_MAX_DEFAULT)
5497 return -EINVAL;
5498
5499 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5500 if (ret < 0)
5501 return ret;
5502
5503 *ppos += cnt;
5504
5505 return cnt;
5506}
5507
5508static const struct file_operations tracing_saved_cmdlines_size_fops = {
5509 .open = tracing_open_generic,
5510 .read = tracing_saved_cmdlines_size_read,
5511 .write = tracing_saved_cmdlines_size_write,
5512};
5513
Jeremy Linton681bec02017-05-31 16:56:53 -05005514#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005515static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005516update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005517{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005518 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005519 if (ptr->tail.next) {
5520 ptr = ptr->tail.next;
5521 /* Set ptr to the next real item (skip head) */
5522 ptr++;
5523 } else
5524 return NULL;
5525 }
5526 return ptr;
5527}
5528
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005529static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005530{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005531 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005532
5533 /*
5534 * Paranoid! If ptr points to end, we don't want to increment past it.
5535 * This really should never happen.
5536 */
Vasily Averin039958a2020-01-24 10:03:01 +03005537 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005538 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005539 if (WARN_ON_ONCE(!ptr))
5540 return NULL;
5541
5542 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005543 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005544
5545 return ptr;
5546}
5547
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005548static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005549{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005550 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005551 loff_t l = 0;
5552
Jeremy Linton1793ed92017-05-31 16:56:46 -05005553 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005554
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005555 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005556 if (v)
5557 v++;
5558
5559 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005560 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005561 }
5562
5563 return v;
5564}
5565
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005566static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005567{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005568 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005569}
5570
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005571static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005572{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005573 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005574
5575 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005576 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005577 ptr->map.system);
5578
5579 return 0;
5580}
5581
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005582static const struct seq_operations tracing_eval_map_seq_ops = {
5583 .start = eval_map_start,
5584 .next = eval_map_next,
5585 .stop = eval_map_stop,
5586 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005587};
5588
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005589static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005590{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005591 int ret;
5592
5593 ret = tracing_check_open_get_tr(NULL);
5594 if (ret)
5595 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005596
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005597 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005598}
5599
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005600static const struct file_operations tracing_eval_map_fops = {
5601 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005602 .read = seq_read,
5603 .llseek = seq_lseek,
5604 .release = seq_release,
5605};
5606
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005607static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005608trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005609{
5610 /* Return tail of array given the head */
5611 return ptr + ptr->head.length + 1;
5612}
5613
5614static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005615trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005616 int len)
5617{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005618 struct trace_eval_map **stop;
5619 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005620 union trace_eval_map_item *map_array;
5621 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005622
5623 stop = start + len;
5624
5625 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005626 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005627 * where the head holds the module and length of array, and the
5628 * tail holds a pointer to the next list.
5629 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005630 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005631 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005632 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005633 return;
5634 }
5635
Jeremy Linton1793ed92017-05-31 16:56:46 -05005636 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005637
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005638 if (!trace_eval_maps)
5639 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005640 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005641 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005642 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005643 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005644 if (!ptr->tail.next)
5645 break;
5646 ptr = ptr->tail.next;
5647
5648 }
5649 ptr->tail.next = map_array;
5650 }
5651 map_array->head.mod = mod;
5652 map_array->head.length = len;
5653 map_array++;
5654
5655 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5656 map_array->map = **map;
5657 map_array++;
5658 }
5659 memset(map_array, 0, sizeof(*map_array));
5660
Jeremy Linton1793ed92017-05-31 16:56:46 -05005661 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005662}
5663
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005664static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005665{
Jeremy Linton681bec02017-05-31 16:56:53 -05005666 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005667 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005668}
5669
Jeremy Linton681bec02017-05-31 16:56:53 -05005670#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005671static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5672static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005673 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005674#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005675
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005676static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005677 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005678{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005679 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005680
5681 if (len <= 0)
5682 return;
5683
5684 map = start;
5685
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005686 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005687
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005688 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005689}
5690
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005691static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692tracing_set_trace_read(struct file *filp, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
5694{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005695 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005696 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005697 int r;
5698
5699 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005700 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005701 mutex_unlock(&trace_types_lock);
5702
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005703 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005704}
5705
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005706int tracer_init(struct tracer *t, struct trace_array *tr)
5707{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005708 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005709 return t->init(tr);
5710}
5711
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005712static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005713{
5714 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005715
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005716 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005717 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005718}
5719
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005720#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005721/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005722static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5723 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005724{
5725 int cpu, ret = 0;
5726
5727 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5728 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005729 ret = ring_buffer_resize(trace_buf->buffer,
5730 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005731 if (ret < 0)
5732 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005733 per_cpu_ptr(trace_buf->data, cpu)->entries =
5734 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005735 }
5736 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005737 ret = ring_buffer_resize(trace_buf->buffer,
5738 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005739 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005740 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5741 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005742 }
5743
5744 return ret;
5745}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005746#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005747
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005748static int __tracing_resize_ring_buffer(struct trace_array *tr,
5749 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005750{
5751 int ret;
5752
5753 /*
5754 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005755 * we use the size that was given, and we can forget about
5756 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005757 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005758 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005759
Steven Rostedtb382ede62012-10-10 21:44:34 -04005760 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005761 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005762 return 0;
5763
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005764 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005765 if (ret < 0)
5766 return ret;
5767
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005768#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005769 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5770 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005771 goto out;
5772
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005773 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005774 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005775 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5776 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005777 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005778 /*
5779 * AARGH! We are left with different
5780 * size max buffer!!!!
5781 * The max buffer is our "snapshot" buffer.
5782 * When a tracer needs a snapshot (one of the
5783 * latency tracers), it swaps the max buffer
5784 * with the saved snap shot. We succeeded to
5785 * update the size of the main buffer, but failed to
5786 * update the size of the max buffer. But when we tried
5787 * to reset the main buffer to the original size, we
5788 * failed there too. This is very unlikely to
5789 * happen, but if it does, warn and kill all
5790 * tracing.
5791 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005792 WARN_ON(1);
5793 tracing_disabled = 1;
5794 }
5795 return ret;
5796 }
5797
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005798 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005799 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005800 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005801 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005802
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005803 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005804#endif /* CONFIG_TRACER_MAX_TRACE */
5805
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005806 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005807 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005808 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005809 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005810
5811 return ret;
5812}
5813
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005814ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5815 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005816{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005817 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005818
5819 mutex_lock(&trace_types_lock);
5820
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005821 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5822 /* make sure, this cpu is enabled in the mask */
5823 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5824 ret = -EINVAL;
5825 goto out;
5826 }
5827 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005828
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005829 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005830 if (ret < 0)
5831 ret = -ENOMEM;
5832
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005833out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005834 mutex_unlock(&trace_types_lock);
5835
5836 return ret;
5837}
5838
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005839
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005840/**
5841 * tracing_update_buffers - used by tracing facility to expand ring buffers
5842 *
5843 * To save on memory when the tracing is never used on a system with it
5844 * configured in. The ring buffers are set to a minimum size. But once
5845 * a user starts to use the tracing facility, then they need to grow
5846 * to their default size.
5847 *
5848 * This function is to be called when a tracer is about to be used.
5849 */
5850int tracing_update_buffers(void)
5851{
5852 int ret = 0;
5853
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005854 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005855 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005856 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005857 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005858 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005859
5860 return ret;
5861}
5862
Steven Rostedt577b7852009-02-26 23:43:05 -05005863struct trace_option_dentry;
5864
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005865static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005866create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005867
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005868/*
5869 * Used to clear out the tracer before deletion of an instance.
5870 * Must have trace_types_lock held.
5871 */
5872static void tracing_set_nop(struct trace_array *tr)
5873{
5874 if (tr->current_trace == &nop_trace)
5875 return;
5876
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005877 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005878
5879 if (tr->current_trace->reset)
5880 tr->current_trace->reset(tr);
5881
5882 tr->current_trace = &nop_trace;
5883}
5884
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005885static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005886{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005887 /* Only enable if the directory has been created already. */
5888 if (!tr->dir)
5889 return;
5890
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005891 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005892}
5893
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005894int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005895{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005896 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005897#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005898 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005899#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005900 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005901
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005902 mutex_lock(&trace_types_lock);
5903
Steven Rostedt73c51622009-03-11 13:42:01 -04005904 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005905 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005906 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005907 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005908 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005909 ret = 0;
5910 }
5911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005912 for (t = trace_types; t; t = t->next) {
5913 if (strcmp(t->name, buf) == 0)
5914 break;
5915 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005916 if (!t) {
5917 ret = -EINVAL;
5918 goto out;
5919 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005920 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005921 goto out;
5922
Tom Zanussia35873a2019-02-13 17:42:45 -06005923#ifdef CONFIG_TRACER_SNAPSHOT
5924 if (t->use_max_tr) {
5925 arch_spin_lock(&tr->max_lock);
5926 if (tr->cond_snapshot)
5927 ret = -EBUSY;
5928 arch_spin_unlock(&tr->max_lock);
5929 if (ret)
5930 goto out;
5931 }
5932#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005933 /* Some tracers won't work on kernel command line */
5934 if (system_state < SYSTEM_RUNNING && t->noboot) {
5935 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5936 t->name);
5937 goto out;
5938 }
5939
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005940 /* Some tracers are only allowed for the top level buffer */
5941 if (!trace_ok_for_array(t, tr)) {
5942 ret = -EINVAL;
5943 goto out;
5944 }
5945
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005946 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04005947 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005948 ret = -EBUSY;
5949 goto out;
5950 }
5951
Steven Rostedt9f029e82008-11-12 15:24:24 -05005952 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005953
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005954 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005955
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005956 if (tr->current_trace->reset)
5957 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005958
Paul E. McKenney74401722018-11-06 18:44:52 -08005959 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005960 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005961
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005962#ifdef CONFIG_TRACER_MAX_TRACE
5963 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005964
5965 if (had_max_tr && !t->use_max_tr) {
5966 /*
5967 * We need to make sure that the update_max_tr sees that
5968 * current_trace changed to nop_trace to keep it from
5969 * swapping the buffers after we resize it.
5970 * The update_max_tr is called from interrupts disabled
5971 * so a synchronized_sched() is sufficient.
5972 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005973 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005974 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005975 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005976#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005977
5978#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005979 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005980 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005981 if (ret < 0)
5982 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005983 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005984#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005985
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005986 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005987 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005988 if (ret)
5989 goto out;
5990 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005991
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005992 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005993 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005994 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005995 out:
5996 mutex_unlock(&trace_types_lock);
5997
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005998 return ret;
5999}
6000
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006001static ssize_t
6002tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6003 size_t cnt, loff_t *ppos)
6004{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006005 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006006 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006007 int i;
6008 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006009 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006010
Steven Rostedt60063a62008-10-28 10:44:24 -04006011 ret = cnt;
6012
Li Zefanee6c2c12009-09-18 14:06:47 +08006013 if (cnt > MAX_TRACER_SIZE)
6014 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006015
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006016 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006017 return -EFAULT;
6018
6019 buf[cnt] = 0;
6020
6021 /* strip ending whitespace. */
6022 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6023 buf[i] = 0;
6024
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006025 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006026 if (err)
6027 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006028
Jiri Olsacf8517c2009-10-23 19:36:16 -04006029 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006030
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006031 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006032}
6033
6034static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006035tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6036 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006037{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006038 char buf[64];
6039 int r;
6040
Steven Rostedtcffae432008-05-12 21:21:00 +02006041 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006042 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006043 if (r > sizeof(buf))
6044 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006045 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006046}
6047
6048static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006049tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6050 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006051{
Hannes Eder5e398412009-02-10 19:44:34 +01006052 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006053 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006054
Peter Huewe22fe9b52011-06-07 21:58:27 +02006055 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6056 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006057 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006058
6059 *ptr = val * 1000;
6060
6061 return cnt;
6062}
6063
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006064static ssize_t
6065tracing_thresh_read(struct file *filp, char __user *ubuf,
6066 size_t cnt, loff_t *ppos)
6067{
6068 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6069}
6070
6071static ssize_t
6072tracing_thresh_write(struct file *filp, const char __user *ubuf,
6073 size_t cnt, loff_t *ppos)
6074{
6075 struct trace_array *tr = filp->private_data;
6076 int ret;
6077
6078 mutex_lock(&trace_types_lock);
6079 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6080 if (ret < 0)
6081 goto out;
6082
6083 if (tr->current_trace->update_thresh) {
6084 ret = tr->current_trace->update_thresh(tr);
6085 if (ret < 0)
6086 goto out;
6087 }
6088
6089 ret = cnt;
6090out:
6091 mutex_unlock(&trace_types_lock);
6092
6093 return ret;
6094}
6095
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006096#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006097
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006098static ssize_t
6099tracing_max_lat_read(struct file *filp, char __user *ubuf,
6100 size_t cnt, loff_t *ppos)
6101{
6102 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6103}
6104
6105static ssize_t
6106tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6107 size_t cnt, loff_t *ppos)
6108{
6109 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6110}
6111
Chen Gange428abb2015-11-10 05:15:15 +08006112#endif
6113
Steven Rostedtb3806b42008-05-12 21:20:46 +02006114static int tracing_open_pipe(struct inode *inode, struct file *filp)
6115{
Oleg Nesterov15544202013-07-23 17:25:57 +02006116 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006117 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006118 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006119
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006120 ret = tracing_check_open_get_tr(tr);
6121 if (ret)
6122 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006123
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006124 mutex_lock(&trace_types_lock);
6125
Steven Rostedtb3806b42008-05-12 21:20:46 +02006126 /* create a buffer to store the information to pass to userspace */
6127 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006128 if (!iter) {
6129 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006130 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006131 goto out;
6132 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006133
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006134 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006135 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006136
6137 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6138 ret = -ENOMEM;
6139 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306140 }
6141
Steven Rostedta3097202008-11-07 22:36:02 -05006142 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306143 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006144
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006145 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006146 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6147
David Sharp8be07092012-11-13 12:18:22 -08006148 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006149 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006150 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6151
Oleg Nesterov15544202013-07-23 17:25:57 +02006152 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006153 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006154 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006155 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006156 filp->private_data = iter;
6157
Steven Rostedt107bad82008-05-12 21:21:01 +02006158 if (iter->trace->pipe_open)
6159 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006160
Arnd Bergmannb4447862010-07-07 23:40:11 +02006161 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006162
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006163 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006164out:
6165 mutex_unlock(&trace_types_lock);
6166 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006167
6168fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006169 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006170 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006171 mutex_unlock(&trace_types_lock);
6172 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006173}
6174
6175static int tracing_release_pipe(struct inode *inode, struct file *file)
6176{
6177 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006178 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006179
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006180 mutex_lock(&trace_types_lock);
6181
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006182 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006183
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006184 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006185 iter->trace->pipe_close(iter);
6186
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006187 mutex_unlock(&trace_types_lock);
6188
Rusty Russell44623442009-01-01 10:12:23 +10306189 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006190 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006191 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006192
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006193 trace_array_put(tr);
6194
Steven Rostedtb3806b42008-05-12 21:20:46 +02006195 return 0;
6196}
6197
Al Viro9dd95742017-07-03 00:42:43 -04006198static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006199trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006200{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006201 struct trace_array *tr = iter->tr;
6202
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006203 /* Iterators are static, they should be filled or empty */
6204 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006205 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006206
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006207 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006208 /*
6209 * Always select as readable when in blocking mode
6210 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006211 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006212 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006213 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006214 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006215}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006216
Al Viro9dd95742017-07-03 00:42:43 -04006217static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006218tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6219{
6220 struct trace_iterator *iter = filp->private_data;
6221
6222 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006223}
6224
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006225/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006226static int tracing_wait_pipe(struct file *filp)
6227{
6228 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006229 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006230
6231 while (trace_empty(iter)) {
6232
6233 if ((filp->f_flags & O_NONBLOCK)) {
6234 return -EAGAIN;
6235 }
6236
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006237 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006238 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006239 * We still block if tracing is disabled, but we have never
6240 * read anything. This allows a user to cat this file, and
6241 * then enable tracing. But after we have read something,
6242 * we give an EOF when tracing is again disabled.
6243 *
6244 * iter->pos will be 0 if we haven't read anything.
6245 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006246 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006247 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006248
6249 mutex_unlock(&iter->mutex);
6250
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006251 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006252
6253 mutex_lock(&iter->mutex);
6254
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006255 if (ret)
6256 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006257 }
6258
6259 return 1;
6260}
6261
Steven Rostedtb3806b42008-05-12 21:20:46 +02006262/*
6263 * Consumer reader.
6264 */
6265static ssize_t
6266tracing_read_pipe(struct file *filp, char __user *ubuf,
6267 size_t cnt, loff_t *ppos)
6268{
6269 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006270 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006271
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006272 /*
6273 * Avoid more than one consumer on a single file descriptor
6274 * This is just a matter of traces coherency, the ring buffer itself
6275 * is protected.
6276 */
6277 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006278
6279 /* return any leftover data */
6280 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6281 if (sret != -EBUSY)
6282 goto out;
6283
6284 trace_seq_init(&iter->seq);
6285
Steven Rostedt107bad82008-05-12 21:21:01 +02006286 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006287 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6288 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006289 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006290 }
6291
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006292waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006293 sret = tracing_wait_pipe(filp);
6294 if (sret <= 0)
6295 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006296
6297 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006298 if (trace_empty(iter)) {
6299 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006300 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006301 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006302
6303 if (cnt >= PAGE_SIZE)
6304 cnt = PAGE_SIZE - 1;
6305
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006306 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006307 memset(&iter->seq, 0,
6308 sizeof(struct trace_iterator) -
6309 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006310 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006311 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006312 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006313
Lai Jiangshan4f535962009-05-18 19:35:34 +08006314 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006315 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006316 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006317 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006318 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006319
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006320 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006321 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006322 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006323 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006324 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006325 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006326 if (ret != TRACE_TYPE_NO_CONSUME)
6327 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006328
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006329 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006330 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006331
6332 /*
6333 * Setting the full flag means we reached the trace_seq buffer
6334 * size and we should leave by partial output condition above.
6335 * One of the trace_seq_* functions is not used properly.
6336 */
6337 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6338 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006339 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006340 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006341 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006342
Steven Rostedtb3806b42008-05-12 21:20:46 +02006343 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006344 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006345 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006346 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006347
6348 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006349 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006350 * entries, go back to wait for more entries.
6351 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006352 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006353 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006354
Steven Rostedt107bad82008-05-12 21:21:01 +02006355out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006356 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006357
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006358 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006359}
6360
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006361static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6362 unsigned int idx)
6363{
6364 __free_page(spd->pages[idx]);
6365}
6366
Steven Rostedt34cd4992009-02-09 12:06:29 -05006367static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006368tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006369{
6370 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006371 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006372 int ret;
6373
6374 /* Seq buffer is page-sized, exactly what we need. */
6375 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006376 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006377 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006378
6379 if (trace_seq_has_overflowed(&iter->seq)) {
6380 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006381 break;
6382 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006383
6384 /*
6385 * This should not be hit, because it should only
6386 * be set if the iter->seq overflowed. But check it
6387 * anyway to be safe.
6388 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006389 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006390 iter->seq.seq.len = save_len;
6391 break;
6392 }
6393
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006394 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006395 if (rem < count) {
6396 rem = 0;
6397 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006398 break;
6399 }
6400
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006401 if (ret != TRACE_TYPE_NO_CONSUME)
6402 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006403 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006404 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006405 rem = 0;
6406 iter->ent = NULL;
6407 break;
6408 }
6409 }
6410
6411 return rem;
6412}
6413
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006414static ssize_t tracing_splice_read_pipe(struct file *filp,
6415 loff_t *ppos,
6416 struct pipe_inode_info *pipe,
6417 size_t len,
6418 unsigned int flags)
6419{
Jens Axboe35f3d142010-05-20 10:43:18 +02006420 struct page *pages_def[PIPE_DEF_BUFFERS];
6421 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006422 struct trace_iterator *iter = filp->private_data;
6423 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006424 .pages = pages_def,
6425 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006426 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006427 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006428 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006429 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006430 };
6431 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006432 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006433 unsigned int i;
6434
Jens Axboe35f3d142010-05-20 10:43:18 +02006435 if (splice_grow_spd(pipe, &spd))
6436 return -ENOMEM;
6437
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006438 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006439
6440 if (iter->trace->splice_read) {
6441 ret = iter->trace->splice_read(iter, filp,
6442 ppos, pipe, len, flags);
6443 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006444 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006445 }
6446
6447 ret = tracing_wait_pipe(filp);
6448 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006449 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006450
Jason Wessel955b61e2010-08-05 09:22:23 -05006451 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006452 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006453 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006454 }
6455
Lai Jiangshan4f535962009-05-18 19:35:34 +08006456 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006457 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006458
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006459 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006460 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006461 spd.pages[i] = alloc_page(GFP_KERNEL);
6462 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006463 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006464
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006465 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006466
6467 /* Copy the data into the page, so we can start over. */
6468 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006469 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006470 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006471 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006472 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006473 break;
6474 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006475 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006476 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006477
Steven Rostedtf9520752009-03-02 14:04:40 -05006478 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006479 }
6480
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006481 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006482 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006483 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006484
6485 spd.nr_pages = i;
6486
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006487 if (i)
6488 ret = splice_to_pipe(pipe, &spd);
6489 else
6490 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006491out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006492 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006493 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006494
Steven Rostedt34cd4992009-02-09 12:06:29 -05006495out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006496 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006497 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006498}
6499
Steven Rostedta98a3c32008-05-12 21:20:59 +02006500static ssize_t
6501tracing_entries_read(struct file *filp, char __user *ubuf,
6502 size_t cnt, loff_t *ppos)
6503{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006504 struct inode *inode = file_inode(filp);
6505 struct trace_array *tr = inode->i_private;
6506 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006507 char buf[64];
6508 int r = 0;
6509 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006510
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006511 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006512
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006513 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006514 int cpu, buf_size_same;
6515 unsigned long size;
6516
6517 size = 0;
6518 buf_size_same = 1;
6519 /* check if all cpu sizes are same */
6520 for_each_tracing_cpu(cpu) {
6521 /* fill in the size from first enabled cpu */
6522 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006523 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6524 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006525 buf_size_same = 0;
6526 break;
6527 }
6528 }
6529
6530 if (buf_size_same) {
6531 if (!ring_buffer_expanded)
6532 r = sprintf(buf, "%lu (expanded: %lu)\n",
6533 size >> 10,
6534 trace_buf_size >> 10);
6535 else
6536 r = sprintf(buf, "%lu\n", size >> 10);
6537 } else
6538 r = sprintf(buf, "X\n");
6539 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006540 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006541
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006542 mutex_unlock(&trace_types_lock);
6543
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006544 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6545 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006546}
6547
6548static ssize_t
6549tracing_entries_write(struct file *filp, const char __user *ubuf,
6550 size_t cnt, loff_t *ppos)
6551{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006552 struct inode *inode = file_inode(filp);
6553 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006554 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006555 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006556
Peter Huewe22fe9b52011-06-07 21:58:27 +02006557 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6558 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006559 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006560
6561 /* must have at least 1 entry */
6562 if (!val)
6563 return -EINVAL;
6564
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006565 /* value is in KB */
6566 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006567 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006568 if (ret < 0)
6569 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006570
Jiri Olsacf8517c2009-10-23 19:36:16 -04006571 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006572
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006573 return cnt;
6574}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006575
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006576static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006577tracing_total_entries_read(struct file *filp, char __user *ubuf,
6578 size_t cnt, loff_t *ppos)
6579{
6580 struct trace_array *tr = filp->private_data;
6581 char buf[64];
6582 int r, cpu;
6583 unsigned long size = 0, expanded_size = 0;
6584
6585 mutex_lock(&trace_types_lock);
6586 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006587 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006588 if (!ring_buffer_expanded)
6589 expanded_size += trace_buf_size >> 10;
6590 }
6591 if (ring_buffer_expanded)
6592 r = sprintf(buf, "%lu\n", size);
6593 else
6594 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6595 mutex_unlock(&trace_types_lock);
6596
6597 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6598}
6599
6600static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006601tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6602 size_t cnt, loff_t *ppos)
6603{
6604 /*
6605 * There is no need to read what the user has written, this function
6606 * is just to make sure that there is no error when "echo" is used
6607 */
6608
6609 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006610
6611 return cnt;
6612}
6613
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006614static int
6615tracing_free_buffer_release(struct inode *inode, struct file *filp)
6616{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006617 struct trace_array *tr = inode->i_private;
6618
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006619 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006620 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006621 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006622 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006623 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006624
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006625 trace_array_put(tr);
6626
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006627 return 0;
6628}
6629
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006630static ssize_t
6631tracing_mark_write(struct file *filp, const char __user *ubuf,
6632 size_t cnt, loff_t *fpos)
6633{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006634 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006635 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006636 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006637 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006638 struct print_entry *entry;
6639 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006640 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006641 int size;
6642 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006643
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006644/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006645#define FAULTED_STR "<faulted>"
6646#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006647
Steven Rostedtc76f0692008-11-07 22:36:02 -05006648 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006649 return -EINVAL;
6650
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006651 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006652 return -EINVAL;
6653
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006654 if (cnt > TRACE_BUF_SIZE)
6655 cnt = TRACE_BUF_SIZE;
6656
Steven Rostedtd696b582011-09-22 11:50:27 -04006657 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006658
Steven Rostedtd696b582011-09-22 11:50:27 -04006659 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006660 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6661
6662 /* If less than "<faulted>", then make sure we can still add that */
6663 if (cnt < FAULTED_SIZE)
6664 size += FAULTED_SIZE - cnt;
6665
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006666 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006667 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6668 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006669 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006670 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006671 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006672
6673 entry = ring_buffer_event_data(event);
6674 entry->ip = _THIS_IP_;
6675
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006676 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6677 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006678 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006679 cnt = FAULTED_SIZE;
6680 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006681 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006682 written = cnt;
6683 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006684
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006685 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6686 /* do not add \n before testing triggers, but add \0 */
6687 entry->buf[cnt] = '\0';
6688 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6689 }
6690
Steven Rostedtd696b582011-09-22 11:50:27 -04006691 if (entry->buf[cnt - 1] != '\n') {
6692 entry->buf[cnt] = '\n';
6693 entry->buf[cnt + 1] = '\0';
6694 } else
6695 entry->buf[cnt] = '\0';
6696
Tingwei Zhang458999c2020-10-05 10:13:15 +03006697 if (static_branch_unlikely(&trace_marker_exports_enabled))
6698 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006699 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006700
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006701 if (tt)
6702 event_triggers_post_call(tr->trace_marker_file, tt);
6703
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006704 if (written > 0)
6705 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006706
Steven Rostedtfa32e852016-07-06 15:25:08 -04006707 return written;
6708}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006709
Steven Rostedtfa32e852016-07-06 15:25:08 -04006710/* Limit it for now to 3K (including tag) */
6711#define RAW_DATA_MAX_SIZE (1024*3)
6712
6713static ssize_t
6714tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6715 size_t cnt, loff_t *fpos)
6716{
6717 struct trace_array *tr = filp->private_data;
6718 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006719 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006720 struct raw_data_entry *entry;
6721 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006722 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006723 int size;
6724 int len;
6725
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006726#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6727
Steven Rostedtfa32e852016-07-06 15:25:08 -04006728 if (tracing_disabled)
6729 return -EINVAL;
6730
6731 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6732 return -EINVAL;
6733
6734 /* The marker must at least have a tag id */
6735 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6736 return -EINVAL;
6737
6738 if (cnt > TRACE_BUF_SIZE)
6739 cnt = TRACE_BUF_SIZE;
6740
6741 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6742
Steven Rostedtfa32e852016-07-06 15:25:08 -04006743 local_save_flags(irq_flags);
6744 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006745 if (cnt < FAULT_SIZE_ID)
6746 size += FAULT_SIZE_ID - cnt;
6747
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006748 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006749 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6750 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006751 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006752 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006753 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006754
6755 entry = ring_buffer_event_data(event);
6756
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006757 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6758 if (len) {
6759 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006760 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006761 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006762 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006763 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006764
6765 __buffer_unlock_commit(buffer, event);
6766
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006767 if (written > 0)
6768 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006769
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006770 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006771}
6772
Li Zefan13f16d22009-12-08 11:16:11 +08006773static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006774{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006775 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006776 int i;
6777
6778 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006779 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006780 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006781 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6782 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006783 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006784
Li Zefan13f16d22009-12-08 11:16:11 +08006785 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006786}
6787
Tom Zanussid71bd342018-01-15 20:52:07 -06006788int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006789{
Zhaolei5079f322009-08-25 16:12:56 +08006790 int i;
6791
Zhaolei5079f322009-08-25 16:12:56 +08006792 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6793 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6794 break;
6795 }
6796 if (i == ARRAY_SIZE(trace_clocks))
6797 return -EINVAL;
6798
Zhaolei5079f322009-08-25 16:12:56 +08006799 mutex_lock(&trace_types_lock);
6800
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006801 tr->clock_id = i;
6802
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006803 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006804
David Sharp60303ed2012-10-11 16:27:52 -07006805 /*
6806 * New clock may not be consistent with the previous clock.
6807 * Reset the buffer so that it doesn't have incomparable timestamps.
6808 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006809 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006810
6811#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006812 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006813 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006814 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006815#endif
David Sharp60303ed2012-10-11 16:27:52 -07006816
Zhaolei5079f322009-08-25 16:12:56 +08006817 mutex_unlock(&trace_types_lock);
6818
Steven Rostedte1e232c2014-02-10 23:38:46 -05006819 return 0;
6820}
6821
6822static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6823 size_t cnt, loff_t *fpos)
6824{
6825 struct seq_file *m = filp->private_data;
6826 struct trace_array *tr = m->private;
6827 char buf[64];
6828 const char *clockstr;
6829 int ret;
6830
6831 if (cnt >= sizeof(buf))
6832 return -EINVAL;
6833
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006834 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006835 return -EFAULT;
6836
6837 buf[cnt] = 0;
6838
6839 clockstr = strstrip(buf);
6840
6841 ret = tracing_set_clock(tr, clockstr);
6842 if (ret)
6843 return ret;
6844
Zhaolei5079f322009-08-25 16:12:56 +08006845 *fpos += cnt;
6846
6847 return cnt;
6848}
6849
Li Zefan13f16d22009-12-08 11:16:11 +08006850static int tracing_clock_open(struct inode *inode, struct file *file)
6851{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006852 struct trace_array *tr = inode->i_private;
6853 int ret;
6854
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006855 ret = tracing_check_open_get_tr(tr);
6856 if (ret)
6857 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006858
6859 ret = single_open(file, tracing_clock_show, inode->i_private);
6860 if (ret < 0)
6861 trace_array_put(tr);
6862
6863 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006864}
6865
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006866static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6867{
6868 struct trace_array *tr = m->private;
6869
6870 mutex_lock(&trace_types_lock);
6871
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006872 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006873 seq_puts(m, "delta [absolute]\n");
6874 else
6875 seq_puts(m, "[delta] absolute\n");
6876
6877 mutex_unlock(&trace_types_lock);
6878
6879 return 0;
6880}
6881
6882static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6883{
6884 struct trace_array *tr = inode->i_private;
6885 int ret;
6886
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006887 ret = tracing_check_open_get_tr(tr);
6888 if (ret)
6889 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006890
6891 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6892 if (ret < 0)
6893 trace_array_put(tr);
6894
6895 return ret;
6896}
6897
Tom Zanussi00b41452018-01-15 20:51:39 -06006898int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6899{
6900 int ret = 0;
6901
6902 mutex_lock(&trace_types_lock);
6903
6904 if (abs && tr->time_stamp_abs_ref++)
6905 goto out;
6906
6907 if (!abs) {
6908 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6909 ret = -EINVAL;
6910 goto out;
6911 }
6912
6913 if (--tr->time_stamp_abs_ref)
6914 goto out;
6915 }
6916
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006917 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006918
6919#ifdef CONFIG_TRACER_MAX_TRACE
6920 if (tr->max_buffer.buffer)
6921 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6922#endif
6923 out:
6924 mutex_unlock(&trace_types_lock);
6925
6926 return ret;
6927}
6928
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006929struct ftrace_buffer_info {
6930 struct trace_iterator iter;
6931 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006932 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006933 unsigned int read;
6934};
6935
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006936#ifdef CONFIG_TRACER_SNAPSHOT
6937static int tracing_snapshot_open(struct inode *inode, struct file *file)
6938{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006939 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006940 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006941 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006942 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006943
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006944 ret = tracing_check_open_get_tr(tr);
6945 if (ret)
6946 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006947
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006948 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006949 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006950 if (IS_ERR(iter))
6951 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006952 } else {
6953 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006954 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006955 m = kzalloc(sizeof(*m), GFP_KERNEL);
6956 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006957 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006958 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6959 if (!iter) {
6960 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006961 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006962 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006963 ret = 0;
6964
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006965 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006966 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006967 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006968 m->private = iter;
6969 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006970 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006971out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006972 if (ret < 0)
6973 trace_array_put(tr);
6974
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006975 return ret;
6976}
6977
6978static ssize_t
6979tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6980 loff_t *ppos)
6981{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006982 struct seq_file *m = filp->private_data;
6983 struct trace_iterator *iter = m->private;
6984 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006985 unsigned long val;
6986 int ret;
6987
6988 ret = tracing_update_buffers();
6989 if (ret < 0)
6990 return ret;
6991
6992 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6993 if (ret)
6994 return ret;
6995
6996 mutex_lock(&trace_types_lock);
6997
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006998 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006999 ret = -EBUSY;
7000 goto out;
7001 }
7002
Tom Zanussia35873a2019-02-13 17:42:45 -06007003 arch_spin_lock(&tr->max_lock);
7004 if (tr->cond_snapshot)
7005 ret = -EBUSY;
7006 arch_spin_unlock(&tr->max_lock);
7007 if (ret)
7008 goto out;
7009
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007010 switch (val) {
7011 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007012 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7013 ret = -EINVAL;
7014 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007015 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007016 if (tr->allocated_snapshot)
7017 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007018 break;
7019 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007020/* Only allow per-cpu swap if the ring buffer supports it */
7021#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7022 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7023 ret = -EINVAL;
7024 break;
7025 }
7026#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007027 if (tr->allocated_snapshot)
7028 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007029 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007030 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007031 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007032 if (ret < 0)
7033 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007034 local_irq_disable();
7035 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007036 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007037 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007038 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007039 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007040 local_irq_enable();
7041 break;
7042 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007043 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007044 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7045 tracing_reset_online_cpus(&tr->max_buffer);
7046 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007047 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007048 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007049 break;
7050 }
7051
7052 if (ret >= 0) {
7053 *ppos += cnt;
7054 ret = cnt;
7055 }
7056out:
7057 mutex_unlock(&trace_types_lock);
7058 return ret;
7059}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007060
7061static int tracing_snapshot_release(struct inode *inode, struct file *file)
7062{
7063 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007064 int ret;
7065
7066 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007067
7068 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007069 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007070
7071 /* If write only, the seq_file is just a stub */
7072 if (m)
7073 kfree(m->private);
7074 kfree(m);
7075
7076 return 0;
7077}
7078
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007079static int tracing_buffers_open(struct inode *inode, struct file *filp);
7080static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7081 size_t count, loff_t *ppos);
7082static int tracing_buffers_release(struct inode *inode, struct file *file);
7083static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7084 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7085
7086static int snapshot_raw_open(struct inode *inode, struct file *filp)
7087{
7088 struct ftrace_buffer_info *info;
7089 int ret;
7090
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007091 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007092 ret = tracing_buffers_open(inode, filp);
7093 if (ret < 0)
7094 return ret;
7095
7096 info = filp->private_data;
7097
7098 if (info->iter.trace->use_max_tr) {
7099 tracing_buffers_release(inode, filp);
7100 return -EBUSY;
7101 }
7102
7103 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007104 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007105
7106 return ret;
7107}
7108
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007109#endif /* CONFIG_TRACER_SNAPSHOT */
7110
7111
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007112static const struct file_operations tracing_thresh_fops = {
7113 .open = tracing_open_generic,
7114 .read = tracing_thresh_read,
7115 .write = tracing_thresh_write,
7116 .llseek = generic_file_llseek,
7117};
7118
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007119#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007120static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007121 .open = tracing_open_generic,
7122 .read = tracing_max_lat_read,
7123 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007124 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007125};
Chen Gange428abb2015-11-10 05:15:15 +08007126#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007127
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007128static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007129 .open = tracing_open_generic,
7130 .read = tracing_set_trace_read,
7131 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007132 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007133};
7134
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007135static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007136 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007137 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007138 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007139 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007140 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007141 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007142};
7143
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007144static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007145 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007146 .read = tracing_entries_read,
7147 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007148 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007149 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007150};
7151
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007152static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007153 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007154 .read = tracing_total_entries_read,
7155 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007156 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007157};
7158
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007159static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007160 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007161 .write = tracing_free_buffer_write,
7162 .release = tracing_free_buffer_release,
7163};
7164
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007165static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007166 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007167 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007168 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007169 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007170};
7171
Steven Rostedtfa32e852016-07-06 15:25:08 -04007172static const struct file_operations tracing_mark_raw_fops = {
7173 .open = tracing_open_generic_tr,
7174 .write = tracing_mark_raw_write,
7175 .llseek = generic_file_llseek,
7176 .release = tracing_release_generic_tr,
7177};
7178
Zhaolei5079f322009-08-25 16:12:56 +08007179static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007180 .open = tracing_clock_open,
7181 .read = seq_read,
7182 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007183 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007184 .write = tracing_clock_write,
7185};
7186
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007187static const struct file_operations trace_time_stamp_mode_fops = {
7188 .open = tracing_time_stamp_mode_open,
7189 .read = seq_read,
7190 .llseek = seq_lseek,
7191 .release = tracing_single_release_tr,
7192};
7193
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007194#ifdef CONFIG_TRACER_SNAPSHOT
7195static const struct file_operations snapshot_fops = {
7196 .open = tracing_snapshot_open,
7197 .read = seq_read,
7198 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007199 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007200 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007201};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007202
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007203static const struct file_operations snapshot_raw_fops = {
7204 .open = snapshot_raw_open,
7205 .read = tracing_buffers_read,
7206 .release = tracing_buffers_release,
7207 .splice_read = tracing_buffers_splice_read,
7208 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007209};
7210
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007211#endif /* CONFIG_TRACER_SNAPSHOT */
7212
Tom Zanussi8a062902019-03-31 18:48:15 -05007213#define TRACING_LOG_ERRS_MAX 8
7214#define TRACING_LOG_LOC_MAX 128
7215
7216#define CMD_PREFIX " Command: "
7217
7218struct err_info {
7219 const char **errs; /* ptr to loc-specific array of err strings */
7220 u8 type; /* index into errs -> specific err string */
7221 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7222 u64 ts;
7223};
7224
7225struct tracing_log_err {
7226 struct list_head list;
7227 struct err_info info;
7228 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7229 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7230};
7231
Tom Zanussi8a062902019-03-31 18:48:15 -05007232static DEFINE_MUTEX(tracing_err_log_lock);
7233
YueHaibingff585c52019-06-14 23:32:10 +08007234static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007235{
7236 struct tracing_log_err *err;
7237
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007238 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007239 err = kzalloc(sizeof(*err), GFP_KERNEL);
7240 if (!err)
7241 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007242 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007243
7244 return err;
7245 }
7246
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007247 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007248 list_del(&err->list);
7249
7250 return err;
7251}
7252
7253/**
7254 * err_pos - find the position of a string within a command for error careting
7255 * @cmd: The tracing command that caused the error
7256 * @str: The string to position the caret at within @cmd
7257 *
7258 * Finds the position of the first occurence of @str within @cmd. The
7259 * return value can be passed to tracing_log_err() for caret placement
7260 * within @cmd.
7261 *
7262 * Returns the index within @cmd of the first occurence of @str or 0
7263 * if @str was not found.
7264 */
7265unsigned int err_pos(char *cmd, const char *str)
7266{
7267 char *found;
7268
7269 if (WARN_ON(!strlen(cmd)))
7270 return 0;
7271
7272 found = strstr(cmd, str);
7273 if (found)
7274 return found - cmd;
7275
7276 return 0;
7277}
7278
7279/**
7280 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007281 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007282 * @loc: A string describing where the error occurred
7283 * @cmd: The tracing command that caused the error
7284 * @errs: The array of loc-specific static error strings
7285 * @type: The index into errs[], which produces the specific static err string
7286 * @pos: The position the caret should be placed in the cmd
7287 *
7288 * Writes an error into tracing/error_log of the form:
7289 *
7290 * <loc>: error: <text>
7291 * Command: <cmd>
7292 * ^
7293 *
7294 * tracing/error_log is a small log file containing the last
7295 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7296 * unless there has been a tracing error, and the error log can be
7297 * cleared and have its memory freed by writing the empty string in
7298 * truncation mode to it i.e. echo > tracing/error_log.
7299 *
7300 * NOTE: the @errs array along with the @type param are used to
7301 * produce a static error string - this string is not copied and saved
7302 * when the error is logged - only a pointer to it is saved. See
7303 * existing callers for examples of how static strings are typically
7304 * defined for use with tracing_log_err().
7305 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007306void tracing_log_err(struct trace_array *tr,
7307 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007308 const char **errs, u8 type, u8 pos)
7309{
7310 struct tracing_log_err *err;
7311
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007312 if (!tr)
7313 tr = &global_trace;
7314
Tom Zanussi8a062902019-03-31 18:48:15 -05007315 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007316 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007317 if (PTR_ERR(err) == -ENOMEM) {
7318 mutex_unlock(&tracing_err_log_lock);
7319 return;
7320 }
7321
7322 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7323 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7324
7325 err->info.errs = errs;
7326 err->info.type = type;
7327 err->info.pos = pos;
7328 err->info.ts = local_clock();
7329
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007330 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007331 mutex_unlock(&tracing_err_log_lock);
7332}
7333
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007334static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007335{
7336 struct tracing_log_err *err, *next;
7337
7338 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007339 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007340 list_del(&err->list);
7341 kfree(err);
7342 }
7343
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007344 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007345 mutex_unlock(&tracing_err_log_lock);
7346}
7347
7348static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7349{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007350 struct trace_array *tr = m->private;
7351
Tom Zanussi8a062902019-03-31 18:48:15 -05007352 mutex_lock(&tracing_err_log_lock);
7353
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007354 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007355}
7356
7357static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7358{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007359 struct trace_array *tr = m->private;
7360
7361 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007362}
7363
7364static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7365{
7366 mutex_unlock(&tracing_err_log_lock);
7367}
7368
7369static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7370{
7371 u8 i;
7372
7373 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7374 seq_putc(m, ' ');
7375 for (i = 0; i < pos; i++)
7376 seq_putc(m, ' ');
7377 seq_puts(m, "^\n");
7378}
7379
7380static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7381{
7382 struct tracing_log_err *err = v;
7383
7384 if (err) {
7385 const char *err_text = err->info.errs[err->info.type];
7386 u64 sec = err->info.ts;
7387 u32 nsec;
7388
7389 nsec = do_div(sec, NSEC_PER_SEC);
7390 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7391 err->loc, err_text);
7392 seq_printf(m, "%s", err->cmd);
7393 tracing_err_log_show_pos(m, err->info.pos);
7394 }
7395
7396 return 0;
7397}
7398
7399static const struct seq_operations tracing_err_log_seq_ops = {
7400 .start = tracing_err_log_seq_start,
7401 .next = tracing_err_log_seq_next,
7402 .stop = tracing_err_log_seq_stop,
7403 .show = tracing_err_log_seq_show
7404};
7405
7406static int tracing_err_log_open(struct inode *inode, struct file *file)
7407{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007408 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007409 int ret = 0;
7410
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007411 ret = tracing_check_open_get_tr(tr);
7412 if (ret)
7413 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007414
Tom Zanussi8a062902019-03-31 18:48:15 -05007415 /* If this file was opened for write, then erase contents */
7416 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007417 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007418
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007419 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007420 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007421 if (!ret) {
7422 struct seq_file *m = file->private_data;
7423 m->private = tr;
7424 } else {
7425 trace_array_put(tr);
7426 }
7427 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007428 return ret;
7429}
7430
7431static ssize_t tracing_err_log_write(struct file *file,
7432 const char __user *buffer,
7433 size_t count, loff_t *ppos)
7434{
7435 return count;
7436}
7437
Takeshi Misawad122ed62019-06-28 19:56:40 +09007438static int tracing_err_log_release(struct inode *inode, struct file *file)
7439{
7440 struct trace_array *tr = inode->i_private;
7441
7442 trace_array_put(tr);
7443
7444 if (file->f_mode & FMODE_READ)
7445 seq_release(inode, file);
7446
7447 return 0;
7448}
7449
Tom Zanussi8a062902019-03-31 18:48:15 -05007450static const struct file_operations tracing_err_log_fops = {
7451 .open = tracing_err_log_open,
7452 .write = tracing_err_log_write,
7453 .read = seq_read,
7454 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007455 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007456};
7457
Steven Rostedt2cadf912008-12-01 22:20:19 -05007458static int tracing_buffers_open(struct inode *inode, struct file *filp)
7459{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007460 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007461 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007462 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007463
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007464 ret = tracing_check_open_get_tr(tr);
7465 if (ret)
7466 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007467
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007468 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007469 if (!info) {
7470 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007471 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007472 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007473
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007474 mutex_lock(&trace_types_lock);
7475
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007476 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007477 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007478 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007479 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007480 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007481 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007482 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007483
7484 filp->private_data = info;
7485
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007486 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007487
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007488 mutex_unlock(&trace_types_lock);
7489
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007490 ret = nonseekable_open(inode, filp);
7491 if (ret < 0)
7492 trace_array_put(tr);
7493
7494 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007495}
7496
Al Viro9dd95742017-07-03 00:42:43 -04007497static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007498tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7499{
7500 struct ftrace_buffer_info *info = filp->private_data;
7501 struct trace_iterator *iter = &info->iter;
7502
7503 return trace_poll(iter, filp, poll_table);
7504}
7505
Steven Rostedt2cadf912008-12-01 22:20:19 -05007506static ssize_t
7507tracing_buffers_read(struct file *filp, char __user *ubuf,
7508 size_t count, loff_t *ppos)
7509{
7510 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007511 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007512 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007513 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007514
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007515 if (!count)
7516 return 0;
7517
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007518#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007519 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7520 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007521#endif
7522
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007523 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007524 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007525 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007526 if (IS_ERR(info->spare)) {
7527 ret = PTR_ERR(info->spare);
7528 info->spare = NULL;
7529 } else {
7530 info->spare_cpu = iter->cpu_file;
7531 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007532 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007533 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007534 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007535
Steven Rostedt2cadf912008-12-01 22:20:19 -05007536 /* Do we have previous read data to read? */
7537 if (info->read < PAGE_SIZE)
7538 goto read;
7539
Steven Rostedtb6273442013-02-28 13:44:11 -05007540 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007541 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007542 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007543 &info->spare,
7544 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007545 iter->cpu_file, 0);
7546 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007547
7548 if (ret < 0) {
7549 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007550 if ((filp->f_flags & O_NONBLOCK))
7551 return -EAGAIN;
7552
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007553 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007554 if (ret)
7555 return ret;
7556
Steven Rostedtb6273442013-02-28 13:44:11 -05007557 goto again;
7558 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007559 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007560 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007561
Steven Rostedt436fc282011-10-14 10:44:25 -04007562 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007563 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007564 size = PAGE_SIZE - info->read;
7565 if (size > count)
7566 size = count;
7567
7568 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007569 if (ret == size)
7570 return -EFAULT;
7571
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007572 size -= ret;
7573
Steven Rostedt2cadf912008-12-01 22:20:19 -05007574 *ppos += size;
7575 info->read += size;
7576
7577 return size;
7578}
7579
7580static int tracing_buffers_release(struct inode *inode, struct file *file)
7581{
7582 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007583 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007584
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007585 mutex_lock(&trace_types_lock);
7586
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007587 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007588
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007589 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007590
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007591 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007592 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007593 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007594 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007595
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007596 mutex_unlock(&trace_types_lock);
7597
Steven Rostedt2cadf912008-12-01 22:20:19 -05007598 return 0;
7599}
7600
7601struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007602 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007603 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007604 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007605 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007606};
7607
Jann Hornb9872222019-04-04 23:59:25 +02007608static void buffer_ref_release(struct buffer_ref *ref)
7609{
7610 if (!refcount_dec_and_test(&ref->refcount))
7611 return;
7612 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7613 kfree(ref);
7614}
7615
Steven Rostedt2cadf912008-12-01 22:20:19 -05007616static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7617 struct pipe_buffer *buf)
7618{
7619 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7620
Jann Hornb9872222019-04-04 23:59:25 +02007621 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007622 buf->private = 0;
7623}
7624
Matthew Wilcox15fab632019-04-05 14:02:10 -07007625static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007626 struct pipe_buffer *buf)
7627{
7628 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7629
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007630 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007631 return false;
7632
Jann Hornb9872222019-04-04 23:59:25 +02007633 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007634 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007635}
7636
7637/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007638static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007639 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007640 .get = buffer_pipe_buf_get,
7641};
7642
7643/*
7644 * Callback from splice_to_pipe(), if we need to release some pages
7645 * at the end of the spd in case we error'ed out in filling the pipe.
7646 */
7647static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7648{
7649 struct buffer_ref *ref =
7650 (struct buffer_ref *)spd->partial[i].private;
7651
Jann Hornb9872222019-04-04 23:59:25 +02007652 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007653 spd->partial[i].private = 0;
7654}
7655
7656static ssize_t
7657tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7658 struct pipe_inode_info *pipe, size_t len,
7659 unsigned int flags)
7660{
7661 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007662 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007663 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7664 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007665 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007666 .pages = pages_def,
7667 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007668 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007669 .ops = &buffer_pipe_buf_ops,
7670 .spd_release = buffer_spd_release,
7671 };
7672 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007673 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007674 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007675
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007676#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007677 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7678 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007679#endif
7680
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007681 if (*ppos & (PAGE_SIZE - 1))
7682 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007683
7684 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007685 if (len < PAGE_SIZE)
7686 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007687 len &= PAGE_MASK;
7688 }
7689
Al Viro1ae22932016-09-17 18:31:46 -04007690 if (splice_grow_spd(pipe, &spd))
7691 return -ENOMEM;
7692
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007693 again:
7694 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007695 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007696
Al Viroa786c062014-04-11 12:01:03 -04007697 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007698 struct page *page;
7699 int r;
7700
7701 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007702 if (!ref) {
7703 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007704 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007705 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007706
Jann Hornb9872222019-04-04 23:59:25 +02007707 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007708 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007709 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007710 if (IS_ERR(ref->page)) {
7711 ret = PTR_ERR(ref->page);
7712 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007713 kfree(ref);
7714 break;
7715 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007716 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007717
7718 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007719 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007720 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007721 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7722 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007723 kfree(ref);
7724 break;
7725 }
7726
Steven Rostedt2cadf912008-12-01 22:20:19 -05007727 page = virt_to_page(ref->page);
7728
7729 spd.pages[i] = page;
7730 spd.partial[i].len = PAGE_SIZE;
7731 spd.partial[i].offset = 0;
7732 spd.partial[i].private = (unsigned long)ref;
7733 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007734 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007735
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007736 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007737 }
7738
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007739 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007740 spd.nr_pages = i;
7741
7742 /* did we read anything? */
7743 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007744 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007745 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007746
Al Viro1ae22932016-09-17 18:31:46 -04007747 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007748 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007749 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007750
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007751 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007752 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007753 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007754
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007755 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007756 }
7757
7758 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007759out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007760 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007761
Steven Rostedt2cadf912008-12-01 22:20:19 -05007762 return ret;
7763}
7764
7765static const struct file_operations tracing_buffers_fops = {
7766 .open = tracing_buffers_open,
7767 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007768 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007769 .release = tracing_buffers_release,
7770 .splice_read = tracing_buffers_splice_read,
7771 .llseek = no_llseek,
7772};
7773
Steven Rostedtc8d77182009-04-29 18:03:45 -04007774static ssize_t
7775tracing_stats_read(struct file *filp, char __user *ubuf,
7776 size_t count, loff_t *ppos)
7777{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007778 struct inode *inode = file_inode(filp);
7779 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007780 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007781 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007782 struct trace_seq *s;
7783 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007784 unsigned long long t;
7785 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007786
Li Zefane4f2d102009-06-15 10:57:28 +08007787 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007788 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007789 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007790
7791 trace_seq_init(s);
7792
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007793 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007794 trace_seq_printf(s, "entries: %ld\n", cnt);
7795
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007796 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007797 trace_seq_printf(s, "overrun: %ld\n", cnt);
7798
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007799 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007800 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7801
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007802 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007803 trace_seq_printf(s, "bytes: %ld\n", cnt);
7804
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007805 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007806 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007807 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007808 usec_rem = do_div(t, USEC_PER_SEC);
7809 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7810 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007811
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007812 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007813 usec_rem = do_div(t, USEC_PER_SEC);
7814 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7815 } else {
7816 /* counter or tsc mode for trace_clock */
7817 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007818 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007819
7820 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007821 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007822 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007823
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007824 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007825 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7826
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007827 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007828 trace_seq_printf(s, "read events: %ld\n", cnt);
7829
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007830 count = simple_read_from_buffer(ubuf, count, ppos,
7831 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007832
7833 kfree(s);
7834
7835 return count;
7836}
7837
7838static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007839 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007840 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007841 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007842 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007843};
7844
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007845#ifdef CONFIG_DYNAMIC_FTRACE
7846
7847static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007848tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007849 size_t cnt, loff_t *ppos)
7850{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007851 ssize_t ret;
7852 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007853 int r;
7854
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007855 /* 256 should be plenty to hold the amount needed */
7856 buf = kmalloc(256, GFP_KERNEL);
7857 if (!buf)
7858 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007859
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007860 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7861 ftrace_update_tot_cnt,
7862 ftrace_number_of_pages,
7863 ftrace_number_of_groups);
7864
7865 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7866 kfree(buf);
7867 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007868}
7869
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007870static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007871 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007872 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007873 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007874};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007875#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007876
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007877#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7878static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007879ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007880 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007881 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007882{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007883 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007884}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007885
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007886static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007887ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007888 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007889 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007890{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007891 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007892 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007893
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007894 if (mapper)
7895 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007896
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007897 if (count) {
7898
7899 if (*count <= 0)
7900 return;
7901
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007902 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007903 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007904
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007905 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007906}
7907
7908static int
7909ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7910 struct ftrace_probe_ops *ops, void *data)
7911{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007912 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007913 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007914
7915 seq_printf(m, "%ps:", (void *)ip);
7916
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007917 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007918
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007919 if (mapper)
7920 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7921
7922 if (count)
7923 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007924 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007925 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007926
7927 return 0;
7928}
7929
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007930static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007931ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007932 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007933{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007934 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007935
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007936 if (!mapper) {
7937 mapper = allocate_ftrace_func_mapper();
7938 if (!mapper)
7939 return -ENOMEM;
7940 *data = mapper;
7941 }
7942
7943 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007944}
7945
7946static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007947ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007948 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007949{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007950 struct ftrace_func_mapper *mapper = data;
7951
7952 if (!ip) {
7953 if (!mapper)
7954 return;
7955 free_ftrace_func_mapper(mapper, NULL);
7956 return;
7957 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007958
7959 ftrace_func_mapper_remove_ip(mapper, ip);
7960}
7961
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007962static struct ftrace_probe_ops snapshot_probe_ops = {
7963 .func = ftrace_snapshot,
7964 .print = ftrace_snapshot_print,
7965};
7966
7967static struct ftrace_probe_ops snapshot_count_probe_ops = {
7968 .func = ftrace_count_snapshot,
7969 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007970 .init = ftrace_snapshot_init,
7971 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007972};
7973
7974static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007975ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007976 char *glob, char *cmd, char *param, int enable)
7977{
7978 struct ftrace_probe_ops *ops;
7979 void *count = (void *)-1;
7980 char *number;
7981 int ret;
7982
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007983 if (!tr)
7984 return -ENODEV;
7985
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007986 /* hash funcs only work with set_ftrace_filter */
7987 if (!enable)
7988 return -EINVAL;
7989
7990 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7991
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007992 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007993 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007994
7995 if (!param)
7996 goto out_reg;
7997
7998 number = strsep(&param, ":");
7999
8000 if (!strlen(number))
8001 goto out_reg;
8002
8003 /*
8004 * We use the callback data field (which is a pointer)
8005 * as our counter.
8006 */
8007 ret = kstrtoul(number, 0, (unsigned long *)&count);
8008 if (ret)
8009 return ret;
8010
8011 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008012 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008013 if (ret < 0)
8014 goto out;
8015
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008016 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008017
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008018 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008019 return ret < 0 ? ret : 0;
8020}
8021
8022static struct ftrace_func_command ftrace_snapshot_cmd = {
8023 .name = "snapshot",
8024 .func = ftrace_trace_snapshot_callback,
8025};
8026
Tom Zanussi38de93a2013-10-24 08:34:18 -05008027static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008028{
8029 return register_ftrace_command(&ftrace_snapshot_cmd);
8030}
8031#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008032static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008033#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008034
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008035static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008036{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008037 if (WARN_ON(!tr->dir))
8038 return ERR_PTR(-ENODEV);
8039
8040 /* Top directory uses NULL as the parent */
8041 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8042 return NULL;
8043
8044 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008045 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008046}
8047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008048static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8049{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008050 struct dentry *d_tracer;
8051
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008052 if (tr->percpu_dir)
8053 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008054
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008055 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008056 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008057 return NULL;
8058
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008059 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008060
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008061 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008062 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008063
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008064 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008065}
8066
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008067static struct dentry *
8068trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8069 void *data, long cpu, const struct file_operations *fops)
8070{
8071 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8072
8073 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008074 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008075 return ret;
8076}
8077
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008078static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008079tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008080{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008081 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008082 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008083 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008084
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008085 if (!d_percpu)
8086 return;
8087
Steven Rostedtdd49a382010-10-20 21:51:26 -04008088 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008089 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008090 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008091 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008092 return;
8093 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008094
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008095 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008096 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008097 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008098
8099 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008100 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008101 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008102
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008103 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008104 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008105
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008106 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008107 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008108
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008109 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008110 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008111
8112#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008113 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008114 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008115
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008116 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008117 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008118#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008119}
8120
Steven Rostedt60a11772008-05-12 21:20:44 +02008121#ifdef CONFIG_FTRACE_SELFTEST
8122/* Let selftest have access to static functions in this file */
8123#include "trace_selftest.c"
8124#endif
8125
Steven Rostedt577b7852009-02-26 23:43:05 -05008126static ssize_t
8127trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8128 loff_t *ppos)
8129{
8130 struct trace_option_dentry *topt = filp->private_data;
8131 char *buf;
8132
8133 if (topt->flags->val & topt->opt->bit)
8134 buf = "1\n";
8135 else
8136 buf = "0\n";
8137
8138 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8139}
8140
8141static ssize_t
8142trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8143 loff_t *ppos)
8144{
8145 struct trace_option_dentry *topt = filp->private_data;
8146 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008147 int ret;
8148
Peter Huewe22fe9b52011-06-07 21:58:27 +02008149 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8150 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008151 return ret;
8152
Li Zefan8d18eaa2009-12-08 11:17:06 +08008153 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008154 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008155
8156 if (!!(topt->flags->val & topt->opt->bit) != val) {
8157 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008158 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008159 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008160 mutex_unlock(&trace_types_lock);
8161 if (ret)
8162 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008163 }
8164
8165 *ppos += cnt;
8166
8167 return cnt;
8168}
8169
8170
8171static const struct file_operations trace_options_fops = {
8172 .open = tracing_open_generic,
8173 .read = trace_options_read,
8174 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008175 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008176};
8177
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008178/*
8179 * In order to pass in both the trace_array descriptor as well as the index
8180 * to the flag that the trace option file represents, the trace_array
8181 * has a character array of trace_flags_index[], which holds the index
8182 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8183 * The address of this character array is passed to the flag option file
8184 * read/write callbacks.
8185 *
8186 * In order to extract both the index and the trace_array descriptor,
8187 * get_tr_index() uses the following algorithm.
8188 *
8189 * idx = *ptr;
8190 *
8191 * As the pointer itself contains the address of the index (remember
8192 * index[1] == 1).
8193 *
8194 * Then to get the trace_array descriptor, by subtracting that index
8195 * from the ptr, we get to the start of the index itself.
8196 *
8197 * ptr - idx == &index[0]
8198 *
8199 * Then a simple container_of() from that pointer gets us to the
8200 * trace_array descriptor.
8201 */
8202static void get_tr_index(void *data, struct trace_array **ptr,
8203 unsigned int *pindex)
8204{
8205 *pindex = *(unsigned char *)data;
8206
8207 *ptr = container_of(data - *pindex, struct trace_array,
8208 trace_flags_index);
8209}
8210
Steven Rostedta8259072009-02-26 22:19:12 -05008211static ssize_t
8212trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8213 loff_t *ppos)
8214{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008215 void *tr_index = filp->private_data;
8216 struct trace_array *tr;
8217 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008218 char *buf;
8219
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008220 get_tr_index(tr_index, &tr, &index);
8221
8222 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008223 buf = "1\n";
8224 else
8225 buf = "0\n";
8226
8227 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8228}
8229
8230static ssize_t
8231trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8232 loff_t *ppos)
8233{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008234 void *tr_index = filp->private_data;
8235 struct trace_array *tr;
8236 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008237 unsigned long val;
8238 int ret;
8239
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008240 get_tr_index(tr_index, &tr, &index);
8241
Peter Huewe22fe9b52011-06-07 21:58:27 +02008242 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8243 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008244 return ret;
8245
Zhaoleif2d84b62009-08-07 18:55:48 +08008246 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008247 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008248
Prateek Sood3a53acf2019-12-10 09:15:16 +00008249 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008250 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008251 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008252 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008253 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008254
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008255 if (ret < 0)
8256 return ret;
8257
Steven Rostedta8259072009-02-26 22:19:12 -05008258 *ppos += cnt;
8259
8260 return cnt;
8261}
8262
Steven Rostedta8259072009-02-26 22:19:12 -05008263static const struct file_operations trace_options_core_fops = {
8264 .open = tracing_open_generic,
8265 .read = trace_options_core_read,
8266 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008267 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008268};
8269
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008270struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008271 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008272 struct dentry *parent,
8273 void *data,
8274 const struct file_operations *fops)
8275{
8276 struct dentry *ret;
8277
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008278 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008279 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008280 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008281
8282 return ret;
8283}
8284
8285
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008286static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008287{
8288 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008289
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008290 if (tr->options)
8291 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008292
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008293 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008294 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008295 return NULL;
8296
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008297 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008298 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008299 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008300 return NULL;
8301 }
8302
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008303 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008304}
8305
Steven Rostedt577b7852009-02-26 23:43:05 -05008306static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008307create_trace_option_file(struct trace_array *tr,
8308 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008309 struct tracer_flags *flags,
8310 struct tracer_opt *opt)
8311{
8312 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008313
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008314 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008315 if (!t_options)
8316 return;
8317
8318 topt->flags = flags;
8319 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008320 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008321
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008322 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008323 &trace_options_fops);
8324
Steven Rostedt577b7852009-02-26 23:43:05 -05008325}
8326
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008327static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008328create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008329{
8330 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008331 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008332 struct tracer_flags *flags;
8333 struct tracer_opt *opts;
8334 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008335 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008336
8337 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008338 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008339
8340 flags = tracer->flags;
8341
8342 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008343 return;
8344
8345 /*
8346 * If this is an instance, only create flags for tracers
8347 * the instance may have.
8348 */
8349 if (!trace_ok_for_array(tracer, tr))
8350 return;
8351
8352 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008353 /* Make sure there's no duplicate flags. */
8354 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008355 return;
8356 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008357
8358 opts = flags->opts;
8359
8360 for (cnt = 0; opts[cnt].name; cnt++)
8361 ;
8362
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008363 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008364 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008365 return;
8366
8367 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8368 GFP_KERNEL);
8369 if (!tr_topts) {
8370 kfree(topts);
8371 return;
8372 }
8373
8374 tr->topts = tr_topts;
8375 tr->topts[tr->nr_topts].tracer = tracer;
8376 tr->topts[tr->nr_topts].topts = topts;
8377 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008378
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008379 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008380 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008381 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008382 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008383 "Failed to create trace option: %s",
8384 opts[cnt].name);
8385 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008386}
8387
Steven Rostedta8259072009-02-26 22:19:12 -05008388static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008389create_trace_option_core_file(struct trace_array *tr,
8390 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008391{
8392 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008393
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008394 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008395 if (!t_options)
8396 return NULL;
8397
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008398 return trace_create_file(option, 0644, t_options,
8399 (void *)&tr->trace_flags_index[index],
8400 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008401}
8402
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008403static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008404{
8405 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008406 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008407 int i;
8408
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008409 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008410 if (!t_options)
8411 return;
8412
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008413 for (i = 0; trace_options[i]; i++) {
8414 if (top_level ||
8415 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8416 create_trace_option_core_file(tr, trace_options[i], i);
8417 }
Steven Rostedta8259072009-02-26 22:19:12 -05008418}
8419
Steven Rostedt499e5472012-02-22 15:50:28 -05008420static ssize_t
8421rb_simple_read(struct file *filp, char __user *ubuf,
8422 size_t cnt, loff_t *ppos)
8423{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008424 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008425 char buf[64];
8426 int r;
8427
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008428 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008429 r = sprintf(buf, "%d\n", r);
8430
8431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8432}
8433
8434static ssize_t
8435rb_simple_write(struct file *filp, const char __user *ubuf,
8436 size_t cnt, loff_t *ppos)
8437{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008438 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008439 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008440 unsigned long val;
8441 int ret;
8442
8443 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8444 if (ret)
8445 return ret;
8446
8447 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008448 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008449 if (!!val == tracer_tracing_is_on(tr)) {
8450 val = 0; /* do nothing */
8451 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008452 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008453 if (tr->current_trace->start)
8454 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008455 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008456 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008457 if (tr->current_trace->stop)
8458 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008459 }
8460 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008461 }
8462
8463 (*ppos)++;
8464
8465 return cnt;
8466}
8467
8468static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008469 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008470 .read = rb_simple_read,
8471 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008472 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008473 .llseek = default_llseek,
8474};
8475
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008476static ssize_t
8477buffer_percent_read(struct file *filp, char __user *ubuf,
8478 size_t cnt, loff_t *ppos)
8479{
8480 struct trace_array *tr = filp->private_data;
8481 char buf[64];
8482 int r;
8483
8484 r = tr->buffer_percent;
8485 r = sprintf(buf, "%d\n", r);
8486
8487 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8488}
8489
8490static ssize_t
8491buffer_percent_write(struct file *filp, const char __user *ubuf,
8492 size_t cnt, loff_t *ppos)
8493{
8494 struct trace_array *tr = filp->private_data;
8495 unsigned long val;
8496 int ret;
8497
8498 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8499 if (ret)
8500 return ret;
8501
8502 if (val > 100)
8503 return -EINVAL;
8504
8505 if (!val)
8506 val = 1;
8507
8508 tr->buffer_percent = val;
8509
8510 (*ppos)++;
8511
8512 return cnt;
8513}
8514
8515static const struct file_operations buffer_percent_fops = {
8516 .open = tracing_open_generic_tr,
8517 .read = buffer_percent_read,
8518 .write = buffer_percent_write,
8519 .release = tracing_release_generic_tr,
8520 .llseek = default_llseek,
8521};
8522
YueHaibingff585c52019-06-14 23:32:10 +08008523static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008524
8525static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008526init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008527
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008528static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008529allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008530{
8531 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008532
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008533 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008534
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008535 buf->tr = tr;
8536
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008537 buf->buffer = ring_buffer_alloc(size, rb_flags);
8538 if (!buf->buffer)
8539 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008540
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008541 buf->data = alloc_percpu(struct trace_array_cpu);
8542 if (!buf->data) {
8543 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008544 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008545 return -ENOMEM;
8546 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008547
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008548 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008549 set_buffer_entries(&tr->array_buffer,
8550 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008551
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008552 return 0;
8553}
8554
8555static int allocate_trace_buffers(struct trace_array *tr, int size)
8556{
8557 int ret;
8558
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008559 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008560 if (ret)
8561 return ret;
8562
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008563#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008564 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8565 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008566 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008567 ring_buffer_free(tr->array_buffer.buffer);
8568 tr->array_buffer.buffer = NULL;
8569 free_percpu(tr->array_buffer.data);
8570 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008571 return -ENOMEM;
8572 }
8573 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008574
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008575 /*
8576 * Only the top level trace array gets its snapshot allocated
8577 * from the kernel command line.
8578 */
8579 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008580#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008581
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008582 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008583}
8584
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008585static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008586{
8587 if (buf->buffer) {
8588 ring_buffer_free(buf->buffer);
8589 buf->buffer = NULL;
8590 free_percpu(buf->data);
8591 buf->data = NULL;
8592 }
8593}
8594
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008595static void free_trace_buffers(struct trace_array *tr)
8596{
8597 if (!tr)
8598 return;
8599
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008600 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008601
8602#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008603 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008604#endif
8605}
8606
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008607static void init_trace_flags_index(struct trace_array *tr)
8608{
8609 int i;
8610
8611 /* Used by the trace options files */
8612 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8613 tr->trace_flags_index[i] = i;
8614}
8615
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008616static void __update_tracer_options(struct trace_array *tr)
8617{
8618 struct tracer *t;
8619
8620 for (t = trace_types; t; t = t->next)
8621 add_tracer_options(tr, t);
8622}
8623
8624static void update_tracer_options(struct trace_array *tr)
8625{
8626 mutex_lock(&trace_types_lock);
8627 __update_tracer_options(tr);
8628 mutex_unlock(&trace_types_lock);
8629}
8630
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008631/* Must have trace_types_lock held */
8632struct trace_array *trace_array_find(const char *instance)
8633{
8634 struct trace_array *tr, *found = NULL;
8635
8636 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8637 if (tr->name && strcmp(tr->name, instance) == 0) {
8638 found = tr;
8639 break;
8640 }
8641 }
8642
8643 return found;
8644}
8645
8646struct trace_array *trace_array_find_get(const char *instance)
8647{
8648 struct trace_array *tr;
8649
8650 mutex_lock(&trace_types_lock);
8651 tr = trace_array_find(instance);
8652 if (tr)
8653 tr->ref++;
8654 mutex_unlock(&trace_types_lock);
8655
8656 return tr;
8657}
8658
Divya Indi28879782019-11-20 11:08:38 -08008659static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008660{
Steven Rostedt277ba042012-08-03 16:10:49 -04008661 struct trace_array *tr;
8662 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008663
Steven Rostedt277ba042012-08-03 16:10:49 -04008664 ret = -ENOMEM;
8665 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8666 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008667 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008668
8669 tr->name = kstrdup(name, GFP_KERNEL);
8670 if (!tr->name)
8671 goto out_free_tr;
8672
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008673 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8674 goto out_free_tr;
8675
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008676 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008677
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008678 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8679
Steven Rostedt277ba042012-08-03 16:10:49 -04008680 raw_spin_lock_init(&tr->start_lock);
8681
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008682 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8683
Steven Rostedt277ba042012-08-03 16:10:49 -04008684 tr->current_trace = &nop_trace;
8685
8686 INIT_LIST_HEAD(&tr->systems);
8687 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008688 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008689 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008690
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008691 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008692 goto out_free_tr;
8693
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008694 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008695 if (!tr->dir)
8696 goto out_free_tr;
8697
8698 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008699 if (ret) {
Al Viroa3d1e7e2019-11-18 09:43:10 -05008700 tracefs_remove(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008701 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008702 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008703
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008704 ftrace_init_trace_array(tr);
8705
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008706 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008707 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008708 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008709
8710 list_add(&tr->list, &ftrace_trace_arrays);
8711
Divya Indi28879782019-11-20 11:08:38 -08008712 tr->ref++;
8713
Steven Rostedt277ba042012-08-03 16:10:49 -04008714
Divya Indif45d1222019-03-20 11:28:51 -07008715 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008716
8717 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008718 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008719 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008720 kfree(tr->name);
8721 kfree(tr);
8722
Divya Indif45d1222019-03-20 11:28:51 -07008723 return ERR_PTR(ret);
8724}
Steven Rostedt277ba042012-08-03 16:10:49 -04008725
Divya Indif45d1222019-03-20 11:28:51 -07008726static int instance_mkdir(const char *name)
8727{
Divya Indi28879782019-11-20 11:08:38 -08008728 struct trace_array *tr;
8729 int ret;
8730
8731 mutex_lock(&event_mutex);
8732 mutex_lock(&trace_types_lock);
8733
8734 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008735 if (trace_array_find(name))
8736 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008737
8738 tr = trace_array_create(name);
8739
8740 ret = PTR_ERR_OR_ZERO(tr);
8741
8742out_unlock:
8743 mutex_unlock(&trace_types_lock);
8744 mutex_unlock(&event_mutex);
8745 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008746}
8747
Divya Indi28879782019-11-20 11:08:38 -08008748/**
8749 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8750 * @name: The name of the trace array to be looked up/created.
8751 *
8752 * Returns pointer to trace array with given name.
8753 * NULL, if it cannot be created.
8754 *
8755 * NOTE: This function increments the reference counter associated with the
8756 * trace array returned. This makes sure it cannot be freed while in use.
8757 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008758 * If the trace_array is to be freed, trace_array_destroy() needs to
8759 * be called after the trace_array_put(), or simply let user space delete
8760 * it from the tracefs instances directory. But until the
8761 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008762 *
8763 */
8764struct trace_array *trace_array_get_by_name(const char *name)
8765{
8766 struct trace_array *tr;
8767
8768 mutex_lock(&event_mutex);
8769 mutex_lock(&trace_types_lock);
8770
8771 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8772 if (tr->name && strcmp(tr->name, name) == 0)
8773 goto out_unlock;
8774 }
8775
8776 tr = trace_array_create(name);
8777
8778 if (IS_ERR(tr))
8779 tr = NULL;
8780out_unlock:
8781 if (tr)
8782 tr->ref++;
8783
8784 mutex_unlock(&trace_types_lock);
8785 mutex_unlock(&event_mutex);
8786 return tr;
8787}
8788EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8789
Divya Indif45d1222019-03-20 11:28:51 -07008790static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008791{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008792 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008793
Divya Indi28879782019-11-20 11:08:38 -08008794 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008795 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07008796 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008797
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008798 list_del(&tr->list);
8799
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008800 /* Disable all the flags that were enabled coming in */
8801 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8802 if ((1 << i) & ZEROED_TRACE_FLAGS)
8803 set_tracer_flag(tr, 1 << i, 0);
8804 }
8805
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008806 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308807 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008808 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008809 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008810 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008811 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008812 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008813
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008814 for (i = 0; i < tr->nr_topts; i++) {
8815 kfree(tr->topts[i].topts);
8816 }
8817 kfree(tr->topts);
8818
Chunyu Hudb9108e02017-07-20 18:36:09 +08008819 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008820 kfree(tr->name);
8821 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008822 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008823
Divya Indif45d1222019-03-20 11:28:51 -07008824 return 0;
8825}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008826
Divya Indie585e642019-08-14 10:55:24 -07008827int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008828{
Divya Indie585e642019-08-14 10:55:24 -07008829 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008830 int ret;
8831
Divya Indie585e642019-08-14 10:55:24 -07008832 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008833 return -EINVAL;
8834
8835 mutex_lock(&event_mutex);
8836 mutex_lock(&trace_types_lock);
8837
Divya Indie585e642019-08-14 10:55:24 -07008838 ret = -ENODEV;
8839
8840 /* Making sure trace array exists before destroying it. */
8841 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8842 if (tr == this_tr) {
8843 ret = __remove_instance(tr);
8844 break;
8845 }
8846 }
Divya Indif45d1222019-03-20 11:28:51 -07008847
8848 mutex_unlock(&trace_types_lock);
8849 mutex_unlock(&event_mutex);
8850
8851 return ret;
8852}
8853EXPORT_SYMBOL_GPL(trace_array_destroy);
8854
8855static int instance_rmdir(const char *name)
8856{
8857 struct trace_array *tr;
8858 int ret;
8859
8860 mutex_lock(&event_mutex);
8861 mutex_lock(&trace_types_lock);
8862
8863 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008864 tr = trace_array_find(name);
8865 if (tr)
8866 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008867
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008868 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008869 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008870
8871 return ret;
8872}
8873
Steven Rostedt277ba042012-08-03 16:10:49 -04008874static __init void create_trace_instances(struct dentry *d_tracer)
8875{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008876 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8877 instance_mkdir,
8878 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008879 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008880 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008881}
8882
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008883static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008884init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008885{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008886 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008887 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008888
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008889 trace_create_file("available_tracers", 0444, d_tracer,
8890 tr, &show_traces_fops);
8891
8892 trace_create_file("current_tracer", 0644, d_tracer,
8893 tr, &set_tracer_fops);
8894
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008895 trace_create_file("tracing_cpumask", 0644, d_tracer,
8896 tr, &tracing_cpumask_fops);
8897
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008898 trace_create_file("trace_options", 0644, d_tracer,
8899 tr, &tracing_iter_fops);
8900
8901 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008902 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008903
8904 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008905 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008906
8907 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008908 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008909
8910 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8911 tr, &tracing_total_entries_fops);
8912
Wang YanQing238ae932013-05-26 16:52:01 +08008913 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008914 tr, &tracing_free_buffer_fops);
8915
8916 trace_create_file("trace_marker", 0220, d_tracer,
8917 tr, &tracing_mark_fops);
8918
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008919 file = __find_event_file(tr, "ftrace", "print");
8920 if (file && file->dir)
8921 trace_create_file("trigger", 0644, file->dir, file,
8922 &event_trigger_fops);
8923 tr->trace_marker_file = file;
8924
Steven Rostedtfa32e852016-07-06 15:25:08 -04008925 trace_create_file("trace_marker_raw", 0220, d_tracer,
8926 tr, &tracing_mark_raw_fops);
8927
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008928 trace_create_file("trace_clock", 0644, d_tracer, tr,
8929 &trace_clock_fops);
8930
8931 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008932 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008933
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008934 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8935 &trace_time_stamp_mode_fops);
8936
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008937 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008938
8939 trace_create_file("buffer_percent", 0444, d_tracer,
8940 tr, &buffer_percent_fops);
8941
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008942 create_trace_options_dir(tr);
8943
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008944#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008945 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008946#endif
8947
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008948 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008949 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008950
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008951#ifdef CONFIG_TRACER_SNAPSHOT
8952 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008953 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008954#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008955
Tom Zanussi8a062902019-03-31 18:48:15 -05008956 trace_create_file("error_log", 0644, d_tracer,
8957 tr, &tracing_err_log_fops);
8958
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008959 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008960 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008961
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008962 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008963}
8964
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008965static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008966{
8967 struct vfsmount *mnt;
8968 struct file_system_type *type;
8969
8970 /*
8971 * To maintain backward compatibility for tools that mount
8972 * debugfs to get to the tracing facility, tracefs is automatically
8973 * mounted to the debugfs/tracing directory.
8974 */
8975 type = get_fs_type("tracefs");
8976 if (!type)
8977 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008978 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008979 put_filesystem(type);
8980 if (IS_ERR(mnt))
8981 return NULL;
8982 mntget(mnt);
8983
8984 return mnt;
8985}
8986
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008987/**
8988 * tracing_init_dentry - initialize top level trace array
8989 *
8990 * This is called when creating files or directories in the tracing
8991 * directory. It is called via fs_initcall() by any of the boot up code
8992 * and expects to return the dentry of the top level tracing directory.
8993 */
8994struct dentry *tracing_init_dentry(void)
8995{
8996 struct trace_array *tr = &global_trace;
8997
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008998 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11008999 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009000 return ERR_PTR(-EPERM);
9001 }
9002
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009003 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009004 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009005 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009006
Peter Enderborg072e1332020-07-16 09:15:10 +02009007 if (WARN_ON(!tracefs_initialized()))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009008 return ERR_PTR(-ENODEV);
9009
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009010 /*
9011 * As there may still be users that expect the tracing
9012 * files to exist in debugfs/tracing, we must automount
9013 * the tracefs file system there, so older tools still
9014 * work with the newer kerenl.
9015 */
9016 tr->dir = debugfs_create_automount("tracing", NULL,
9017 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009018
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009019 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009020}
9021
Jeremy Linton00f4b652017-05-31 16:56:43 -05009022extern struct trace_eval_map *__start_ftrace_eval_maps[];
9023extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009024
Jeremy Linton5f60b352017-05-31 16:56:47 -05009025static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009026{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009027 int len;
9028
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009029 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009030 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009031}
9032
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009033#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009034static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009035{
Jeremy Linton99be6472017-05-31 16:56:44 -05009036 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009037 return;
9038
9039 /*
9040 * Modules with bad taint do not have events created, do
9041 * not bother with enums either.
9042 */
9043 if (trace_module_has_bad_taint(mod))
9044 return;
9045
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009046 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009047}
9048
Jeremy Linton681bec02017-05-31 16:56:53 -05009049#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009050static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009051{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009052 union trace_eval_map_item *map;
9053 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009054
Jeremy Linton99be6472017-05-31 16:56:44 -05009055 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009056 return;
9057
Jeremy Linton1793ed92017-05-31 16:56:46 -05009058 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009059
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009060 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009061
9062 while (map) {
9063 if (map->head.mod == mod)
9064 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009065 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009066 last = &map->tail.next;
9067 map = map->tail.next;
9068 }
9069 if (!map)
9070 goto out;
9071
Jeremy Linton5f60b352017-05-31 16:56:47 -05009072 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009073 kfree(map);
9074 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009075 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009076}
9077#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009078static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009079#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009080
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009081static int trace_module_notify(struct notifier_block *self,
9082 unsigned long val, void *data)
9083{
9084 struct module *mod = data;
9085
9086 switch (val) {
9087 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009088 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009089 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009090 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009091 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009092 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009093 }
9094
9095 return 0;
9096}
9097
9098static struct notifier_block trace_module_nb = {
9099 .notifier_call = trace_module_notify,
9100 .priority = 0,
9101};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009102#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009103
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009104static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009105{
9106 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009107
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009108 trace_access_lock_init();
9109
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009110 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05009111 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009112 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009113
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009114 event_trace_init();
9115
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009116 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04009117 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009118
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009119 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009120 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009121
Li Zefan339ae5d2009-04-17 10:34:30 +08009122 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009123 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009124
Avadh Patel69abe6a2009-04-10 16:04:48 -04009125 trace_create_file("saved_cmdlines", 0444, d_tracer,
9126 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009127
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009128 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9129 NULL, &tracing_saved_cmdlines_size_fops);
9130
Michael Sartain99c621d2017-07-05 22:07:15 -06009131 trace_create_file("saved_tgids", 0444, d_tracer,
9132 NULL, &tracing_saved_tgids_fops);
9133
Jeremy Linton5f60b352017-05-31 16:56:47 -05009134 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009135
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009136 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009137
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009138#ifdef CONFIG_MODULES
9139 register_module_notifier(&trace_module_nb);
9140#endif
9141
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009142#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009143 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009144 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009145#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009146
Steven Rostedt277ba042012-08-03 16:10:49 -04009147 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009148
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009149 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009150
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009151 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009152}
9153
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009154static int trace_panic_handler(struct notifier_block *this,
9155 unsigned long event, void *unused)
9156{
Steven Rostedt944ac422008-10-23 19:26:08 -04009157 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009158 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009159 return NOTIFY_OK;
9160}
9161
9162static struct notifier_block trace_panic_notifier = {
9163 .notifier_call = trace_panic_handler,
9164 .next = NULL,
9165 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9166};
9167
9168static int trace_die_handler(struct notifier_block *self,
9169 unsigned long val,
9170 void *data)
9171{
9172 switch (val) {
9173 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009174 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009175 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009176 break;
9177 default:
9178 break;
9179 }
9180 return NOTIFY_OK;
9181}
9182
9183static struct notifier_block trace_die_notifier = {
9184 .notifier_call = trace_die_handler,
9185 .priority = 200
9186};
9187
9188/*
9189 * printk is set to max of 1024, we really don't need it that big.
9190 * Nothing should be printing 1000 characters anyway.
9191 */
9192#define TRACE_MAX_PRINT 1000
9193
9194/*
9195 * Define here KERN_TRACE so that we have one place to modify
9196 * it if we decide to change what log level the ftrace dump
9197 * should be at.
9198 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009199#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009200
Jason Wessel955b61e2010-08-05 09:22:23 -05009201void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009202trace_printk_seq(struct trace_seq *s)
9203{
9204 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009205 if (s->seq.len >= TRACE_MAX_PRINT)
9206 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009207
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009208 /*
9209 * More paranoid code. Although the buffer size is set to
9210 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9211 * an extra layer of protection.
9212 */
9213 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9214 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009215
9216 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009217 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009218
9219 printk(KERN_TRACE "%s", s->buffer);
9220
Steven Rostedtf9520752009-03-02 14:04:40 -05009221 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009222}
9223
Jason Wessel955b61e2010-08-05 09:22:23 -05009224void trace_init_global_iter(struct trace_iterator *iter)
9225{
9226 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009227 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009228 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009229 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009230
9231 if (iter->trace && iter->trace->open)
9232 iter->trace->open(iter);
9233
9234 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009235 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009236 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9237
9238 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9239 if (trace_clocks[iter->tr->clock_id].in_ns)
9240 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009241}
9242
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009243void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009244{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009245 /* use static because iter can be a bit big for the stack */
9246 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009247 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009248 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009249 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009250 unsigned long flags;
9251 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009252
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009253 /* Only allow one dump user at a time. */
9254 if (atomic_inc_return(&dump_running) != 1) {
9255 atomic_dec(&dump_running);
9256 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009257 }
9258
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009259 /*
9260 * Always turn off tracing when we dump.
9261 * We don't need to show trace output of what happens
9262 * between multiple crashes.
9263 *
9264 * If the user does a sysrq-z, then they can re-enable
9265 * tracing with echo 1 > tracing_on.
9266 */
9267 tracing_off();
9268
9269 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009270 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009271
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009272 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009273 trace_init_global_iter(&iter);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009274 /* Can not use kmalloc for iter.temp */
9275 iter.temp = static_temp_buf;
9276 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009277
Steven Rostedtd7690412008-10-01 00:29:53 -04009278 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009279 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009280 }
9281
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009282 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009283
Török Edwinb54d3de2008-11-22 13:28:48 +02009284 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009285 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009286
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009287 switch (oops_dump_mode) {
9288 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009289 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009290 break;
9291 case DUMP_ORIG:
9292 iter.cpu_file = raw_smp_processor_id();
9293 break;
9294 case DUMP_NONE:
9295 goto out_enable;
9296 default:
9297 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009298 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009299 }
9300
9301 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009302
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009303 /* Did function tracer already get disabled? */
9304 if (ftrace_is_dead()) {
9305 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9306 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9307 }
9308
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009309 /*
9310 * We need to stop all tracing on all CPUS to read the
9311 * the next buffer. This is a bit expensive, but is
9312 * not done often. We fill all what we can read,
9313 * and then release the locks again.
9314 */
9315
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009316 while (!trace_empty(&iter)) {
9317
9318 if (!cnt)
9319 printk(KERN_TRACE "---------------------------------\n");
9320
9321 cnt++;
9322
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009323 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009324 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009325
Jason Wessel955b61e2010-08-05 09:22:23 -05009326 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009327 int ret;
9328
9329 ret = print_trace_line(&iter);
9330 if (ret != TRACE_TYPE_NO_CONSUME)
9331 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009332 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009333 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009334
9335 trace_printk_seq(&iter.seq);
9336 }
9337
9338 if (!cnt)
9339 printk(KERN_TRACE " (ftrace buffer empty)\n");
9340 else
9341 printk(KERN_TRACE "---------------------------------\n");
9342
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009343 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009344 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009345
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009346 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009347 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009348 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009349 atomic_dec(&dump_running);
9350 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009351 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009352}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009353EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009354
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009355int trace_run_command(const char *buf, int (*createfn)(int, char **))
9356{
9357 char **argv;
9358 int argc, ret;
9359
9360 argc = 0;
9361 ret = 0;
9362 argv = argv_split(GFP_KERNEL, buf, &argc);
9363 if (!argv)
9364 return -ENOMEM;
9365
9366 if (argc)
9367 ret = createfn(argc, argv);
9368
9369 argv_free(argv);
9370
9371 return ret;
9372}
9373
9374#define WRITE_BUFSIZE 4096
9375
9376ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9377 size_t count, loff_t *ppos,
9378 int (*createfn)(int, char **))
9379{
9380 char *kbuf, *buf, *tmp;
9381 int ret = 0;
9382 size_t done = 0;
9383 size_t size;
9384
9385 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9386 if (!kbuf)
9387 return -ENOMEM;
9388
9389 while (done < count) {
9390 size = count - done;
9391
9392 if (size >= WRITE_BUFSIZE)
9393 size = WRITE_BUFSIZE - 1;
9394
9395 if (copy_from_user(kbuf, buffer + done, size)) {
9396 ret = -EFAULT;
9397 goto out;
9398 }
9399 kbuf[size] = '\0';
9400 buf = kbuf;
9401 do {
9402 tmp = strchr(buf, '\n');
9403 if (tmp) {
9404 *tmp = '\0';
9405 size = tmp - buf + 1;
9406 } else {
9407 size = strlen(buf);
9408 if (done + size < count) {
9409 if (buf != kbuf)
9410 break;
9411 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9412 pr_warn("Line length is too long: Should be less than %d\n",
9413 WRITE_BUFSIZE - 2);
9414 ret = -EINVAL;
9415 goto out;
9416 }
9417 }
9418 done += size;
9419
9420 /* Remove comments */
9421 tmp = strchr(buf, '#');
9422
9423 if (tmp)
9424 *tmp = '\0';
9425
9426 ret = trace_run_command(buf, createfn);
9427 if (ret)
9428 goto out;
9429 buf += size;
9430
9431 } while (done < count);
9432 }
9433 ret = done;
9434
9435out:
9436 kfree(kbuf);
9437
9438 return ret;
9439}
9440
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009441__init static int tracer_alloc_buffers(void)
9442{
Steven Rostedt73c51622009-03-11 13:42:01 -04009443 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309444 int ret = -ENOMEM;
9445
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009446
9447 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009448 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009449 return -EPERM;
9450 }
9451
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009452 /*
9453 * Make sure we don't accidently add more trace options
9454 * than we have bits for.
9455 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009456 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009457
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309458 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9459 goto out;
9460
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009461 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309462 goto out_free_buffer_mask;
9463
Steven Rostedt07d777f2011-09-22 14:01:55 -04009464 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009465 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009466 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009467 trace_printk_init_buffers();
9468
Steven Rostedt73c51622009-03-11 13:42:01 -04009469 /* To save memory, keep the ring buffer size to its minimum */
9470 if (ring_buffer_expanded)
9471 ring_buf_size = trace_buf_size;
9472 else
9473 ring_buf_size = 1;
9474
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309475 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009476 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009477
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009478 raw_spin_lock_init(&global_trace.start_lock);
9479
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009480 /*
9481 * The prepare callbacks allocates some memory for the ring buffer. We
9482 * don't free the buffer if the if the CPU goes down. If we were to free
9483 * the buffer, then the user would lose any trace that was in the
9484 * buffer. The memory will be removed once the "instance" is removed.
9485 */
9486 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9487 "trace/RB:preapre", trace_rb_cpu_prepare,
9488 NULL);
9489 if (ret < 0)
9490 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009491 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009492 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009493 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9494 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009495 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009496
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009497 if (trace_create_savedcmd() < 0)
9498 goto out_free_temp_buffer;
9499
Steven Rostedtab464282008-05-12 21:21:00 +02009500 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009501 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009502 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009503 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009504 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009505
Steven Rostedt499e5472012-02-22 15:50:28 -05009506 if (global_trace.buffer_disabled)
9507 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009508
Steven Rostedte1e232c2014-02-10 23:38:46 -05009509 if (trace_boot_clock) {
9510 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9511 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009512 pr_warn("Trace clock %s not defined, going back to default\n",
9513 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009514 }
9515
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009516 /*
9517 * register_tracer() might reference current_trace, so it
9518 * needs to be set before we register anything. This is
9519 * just a bootstrap of current_trace anyway.
9520 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009521 global_trace.current_trace = &nop_trace;
9522
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009523 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9524
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009525 ftrace_init_global_array_ops(&global_trace);
9526
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009527 init_trace_flags_index(&global_trace);
9528
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009529 register_tracer(&nop_trace);
9530
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009531 /* Function tracing may start here (via kernel command line) */
9532 init_function_trace();
9533
Steven Rostedt60a11772008-05-12 21:20:44 +02009534 /* All seems OK, enable tracing */
9535 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009536
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009537 atomic_notifier_chain_register(&panic_notifier_list,
9538 &trace_panic_notifier);
9539
9540 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009541
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009542 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9543
9544 INIT_LIST_HEAD(&global_trace.systems);
9545 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009546 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009547 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009548 list_add(&global_trace.list, &ftrace_trace_arrays);
9549
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009550 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009551
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009552 register_snapshot_cmd();
9553
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009554 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009555
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009556out_free_savedcmd:
9557 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009558out_free_temp_buffer:
9559 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009560out_rm_hp_state:
9561 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309562out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009563 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309564out_free_buffer_mask:
9565 free_cpumask_var(tracing_buffer_mask);
9566out:
9567 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009568}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009569
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009570void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009571{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009572 if (tracepoint_printk) {
9573 tracepoint_print_iter =
9574 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009575 if (MEM_FAIL(!tracepoint_print_iter,
9576 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009577 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009578 else
9579 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009580 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009581 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009582}
9583
9584void __init trace_init(void)
9585{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009586 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009587}
9588
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009589__init static int clear_boot_tracer(void)
9590{
9591 /*
9592 * The default tracer at boot buffer is an init section.
9593 * This function is called in lateinit. If we did not
9594 * find the boot tracer, then clear it out, to prevent
9595 * later registration from accessing the buffer that is
9596 * about to be freed.
9597 */
9598 if (!default_bootup_tracer)
9599 return 0;
9600
9601 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9602 default_bootup_tracer);
9603 default_bootup_tracer = NULL;
9604
9605 return 0;
9606}
9607
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009608fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009609late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009610
9611#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9612__init static int tracing_set_default_clock(void)
9613{
9614 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009615 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009616 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9617 pr_warn("Can not set tracing clock due to lockdown\n");
9618 return -EPERM;
9619 }
9620
Chris Wilson3fd49c92018-03-30 16:01:31 +01009621 printk(KERN_WARNING
9622 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9623 "If you want to keep using the local clock, then add:\n"
9624 " \"trace_clock=local\"\n"
9625 "on the kernel command line\n");
9626 tracing_set_clock(&global_trace, "global");
9627 }
9628
9629 return 0;
9630}
9631late_initcall_sync(tracing_set_default_clock);
9632#endif