blob: 410cfeb16db518b4032b7fc997650adb0d8751dd [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900165int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500166static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200167 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168
Li Zefanee6c2c12009-09-18 14:06:47 +0800169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500171static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500173static bool allocate_snapshot;
174
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500178 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400179 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500180 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181 return 1;
182}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200183__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt944ac422008-10-23 19:26:08 -0400185static int __init set_ftrace_dump_on_oops(char *str)
186{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200200
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201static int __init stop_trace_on_warning(char *str)
202{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205 return 1;
206}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200207__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400208
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400209static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500210{
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400216__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500217
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400220
221static int __init set_trace_boot_options(char *str)
222{
Chen Gang67012ab2013-04-08 12:06:44 +0800223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
Steven Rostedte1e232c2014-02-10 23:38:46 -0500228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400246
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100247unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300254static void
255trace_process_export(struct trace_export *export,
256 struct ring_buffer_event *event, int flag)
257{
258 struct trace_entry *entry;
259 unsigned int size = 0;
260
261 if (export->flags & flag) {
262 entry = ring_buffer_event_data(event);
263 size = ring_buffer_event_length(event);
264 export->write(export, entry, size);
265 }
266}
267
268static DEFINE_MUTEX(ftrace_export_lock);
269
270static struct trace_export __rcu *ftrace_exports_list __read_mostly;
271
272static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
273static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300274static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300275
276static inline void ftrace_exports_enable(struct trace_export *export)
277{
278 if (export->flags & TRACE_EXPORT_FUNCTION)
279 static_branch_inc(&trace_function_exports_enabled);
280
281 if (export->flags & TRACE_EXPORT_EVENT)
282 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300283
284 if (export->flags & TRACE_EXPORT_MARKER)
285 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300286}
287
288static inline void ftrace_exports_disable(struct trace_export *export)
289{
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_dec(&trace_function_exports_enabled);
292
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300295
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300298}
299
300static void ftrace_exports(struct ring_buffer_event *event, int flag)
301{
302 struct trace_export *export;
303
304 preempt_disable_notrace();
305
306 export = rcu_dereference_raw_check(ftrace_exports_list);
307 while (export) {
308 trace_process_export(export, event, flag);
309 export = rcu_dereference_raw_check(export->next);
310 }
311
312 preempt_enable_notrace();
313}
314
315static inline void
316add_trace_export(struct trace_export **list, struct trace_export *export)
317{
318 rcu_assign_pointer(export->next, *list);
319 /*
320 * We are entering export into the list but another
321 * CPU might be walking that list. We need to make sure
322 * the export->next pointer is valid before another CPU sees
323 * the export pointer included into the list.
324 */
325 rcu_assign_pointer(*list, export);
326}
327
328static inline int
329rm_trace_export(struct trace_export **list, struct trace_export *export)
330{
331 struct trace_export **p;
332
333 for (p = list; *p != NULL; p = &(*p)->next)
334 if (*p == export)
335 break;
336
337 if (*p != export)
338 return -1;
339
340 rcu_assign_pointer(*p, (*p)->next);
341
342 return 0;
343}
344
345static inline void
346add_ftrace_export(struct trace_export **list, struct trace_export *export)
347{
348 ftrace_exports_enable(export);
349
350 add_trace_export(list, export);
351}
352
353static inline int
354rm_ftrace_export(struct trace_export **list, struct trace_export *export)
355{
356 int ret;
357
358 ret = rm_trace_export(list, export);
359 ftrace_exports_disable(export);
360
361 return ret;
362}
363
364int register_ftrace_export(struct trace_export *export)
365{
366 if (WARN_ON_ONCE(!export->write))
367 return -1;
368
369 mutex_lock(&ftrace_export_lock);
370
371 add_ftrace_export(&ftrace_exports_list, export);
372
373 mutex_unlock(&ftrace_export_lock);
374
375 return 0;
376}
377EXPORT_SYMBOL_GPL(register_ftrace_export);
378
379int unregister_ftrace_export(struct trace_export *export)
380{
381 int ret;
382
383 mutex_lock(&ftrace_export_lock);
384
385 ret = rm_ftrace_export(&ftrace_exports_list, export);
386
387 mutex_unlock(&ftrace_export_lock);
388
389 return ret;
390}
391EXPORT_SYMBOL_GPL(unregister_ftrace_export);
392
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400393/* trace_flags holds trace_options default values */
394#define TRACE_DEFAULT_FLAGS \
395 (FUNCTION_DEFAULT_FLAGS | \
396 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
397 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
398 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
399 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
400
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400401/* trace_options that are only supported by global_trace */
402#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
403 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
404
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400405/* trace_flags that are default zero for instances */
406#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900407 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400408
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200409/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800410 * The global_trace is the descriptor that holds the top-level tracing
411 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200412 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400413static struct trace_array global_trace = {
414 .trace_flags = TRACE_DEFAULT_FLAGS,
415};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200416
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400417LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200418
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400419int trace_array_get(struct trace_array *this_tr)
420{
421 struct trace_array *tr;
422 int ret = -ENODEV;
423
424 mutex_lock(&trace_types_lock);
425 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
426 if (tr == this_tr) {
427 tr->ref++;
428 ret = 0;
429 break;
430 }
431 }
432 mutex_unlock(&trace_types_lock);
433
434 return ret;
435}
436
437static void __trace_array_put(struct trace_array *this_tr)
438{
439 WARN_ON(!this_tr->ref);
440 this_tr->ref--;
441}
442
Divya Indi28879782019-11-20 11:08:38 -0800443/**
444 * trace_array_put - Decrement the reference counter for this trace array.
445 *
446 * NOTE: Use this when we no longer need the trace array returned by
447 * trace_array_get_by_name(). This ensures the trace array can be later
448 * destroyed.
449 *
450 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400451void trace_array_put(struct trace_array *this_tr)
452{
Divya Indi28879782019-11-20 11:08:38 -0800453 if (!this_tr)
454 return;
455
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400456 mutex_lock(&trace_types_lock);
457 __trace_array_put(this_tr);
458 mutex_unlock(&trace_types_lock);
459}
Divya Indi28879782019-11-20 11:08:38 -0800460EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400461
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400462int tracing_check_open_get_tr(struct trace_array *tr)
463{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400464 int ret;
465
466 ret = security_locked_down(LOCKDOWN_TRACEFS);
467 if (ret)
468 return ret;
469
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400470 if (tracing_disabled)
471 return -ENODEV;
472
473 if (tr && trace_array_get(tr) < 0)
474 return -ENODEV;
475
476 return 0;
477}
478
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400479int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500480 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500481 struct ring_buffer_event *event)
482{
483 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
484 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400485 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500486 return 1;
487 }
488
489 return 0;
490}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500491
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400492void trace_free_pid_list(struct trace_pid_list *pid_list)
493{
494 vfree(pid_list->pids);
495 kfree(pid_list);
496}
497
Steven Rostedtd8275c42016-04-14 12:15:22 -0400498/**
499 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
500 * @filtered_pids: The list of pids to check
501 * @search_pid: The PID to find in @filtered_pids
502 *
503 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
504 */
505bool
506trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
507{
508 /*
509 * If pid_max changed after filtered_pids was created, we
510 * by default ignore all pids greater than the previous pid_max.
511 */
512 if (search_pid >= filtered_pids->pid_max)
513 return false;
514
515 return test_bit(search_pid, filtered_pids->pids);
516}
517
518/**
519 * trace_ignore_this_task - should a task be ignored for tracing
520 * @filtered_pids: The list of pids to check
521 * @task: The task that should be ignored if not filtered
522 *
523 * Checks if @task should be traced or not from @filtered_pids.
524 * Returns true if @task should *NOT* be traced.
525 * Returns false if @task should be traced.
526 */
527bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400528trace_ignore_this_task(struct trace_pid_list *filtered_pids,
529 struct trace_pid_list *filtered_no_pids,
530 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400531{
532 /*
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400533 * If filterd_no_pids is not empty, and the task's pid is listed
534 * in filtered_no_pids, then return true.
535 * Otherwise, if filtered_pids is empty, that means we can
536 * trace all tasks. If it has content, then only trace pids
537 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400538 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400539
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400540 return (filtered_pids &&
541 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
542 (filtered_no_pids &&
543 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400544}
545
546/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700547 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400548 * @pid_list: The list to modify
549 * @self: The current task for fork or NULL for exit
550 * @task: The task to add or remove
551 *
552 * If adding a task, if @self is defined, the task is only added if @self
553 * is also included in @pid_list. This happens on fork and tasks should
554 * only be added when the parent is listed. If @self is NULL, then the
555 * @task pid will be removed from the list, which would happen on exit
556 * of a task.
557 */
558void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
559 struct task_struct *self,
560 struct task_struct *task)
561{
562 if (!pid_list)
563 return;
564
565 /* For forks, we only add if the forking task is listed */
566 if (self) {
567 if (!trace_find_filtered_pid(pid_list, self->pid))
568 return;
569 }
570
571 /* Sorry, but we don't support pid_max changing after setting */
572 if (task->pid >= pid_list->pid_max)
573 return;
574
575 /* "self" is set for forks, and NULL for exits */
576 if (self)
577 set_bit(task->pid, pid_list->pids);
578 else
579 clear_bit(task->pid, pid_list->pids);
580}
581
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400582/**
583 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
584 * @pid_list: The pid list to show
585 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
586 * @pos: The position of the file
587 *
588 * This is used by the seq_file "next" operation to iterate the pids
589 * listed in a trace_pid_list structure.
590 *
591 * Returns the pid+1 as we want to display pid of zero, but NULL would
592 * stop the iteration.
593 */
594void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
595{
596 unsigned long pid = (unsigned long)v;
597
598 (*pos)++;
599
600 /* pid already is +1 of the actual prevous bit */
601 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
602
603 /* Return pid + 1 to allow zero to be represented */
604 if (pid < pid_list->pid_max)
605 return (void *)(pid + 1);
606
607 return NULL;
608}
609
610/**
611 * trace_pid_start - Used for seq_file to start reading pid lists
612 * @pid_list: The pid list to show
613 * @pos: The position of the file
614 *
615 * This is used by seq_file "start" operation to start the iteration
616 * of listing pids.
617 *
618 * Returns the pid+1 as we want to display pid of zero, but NULL would
619 * stop the iteration.
620 */
621void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
622{
623 unsigned long pid;
624 loff_t l = 0;
625
626 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
627 if (pid >= pid_list->pid_max)
628 return NULL;
629
630 /* Return pid + 1 so that zero can be the exit value */
631 for (pid++; pid && l < *pos;
632 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
633 ;
634 return (void *)pid;
635}
636
637/**
638 * trace_pid_show - show the current pid in seq_file processing
639 * @m: The seq_file structure to write into
640 * @v: A void pointer of the pid (+1) value to display
641 *
642 * Can be directly used by seq_file operations to display the current
643 * pid value.
644 */
645int trace_pid_show(struct seq_file *m, void *v)
646{
647 unsigned long pid = (unsigned long)v - 1;
648
649 seq_printf(m, "%lu\n", pid);
650 return 0;
651}
652
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400653/* 128 should be much more than enough */
654#define PID_BUF_SIZE 127
655
656int trace_pid_write(struct trace_pid_list *filtered_pids,
657 struct trace_pid_list **new_pid_list,
658 const char __user *ubuf, size_t cnt)
659{
660 struct trace_pid_list *pid_list;
661 struct trace_parser parser;
662 unsigned long val;
663 int nr_pids = 0;
664 ssize_t read = 0;
665 ssize_t ret = 0;
666 loff_t pos;
667 pid_t pid;
668
669 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
670 return -ENOMEM;
671
672 /*
673 * Always recreate a new array. The write is an all or nothing
674 * operation. Always create a new array when adding new pids by
675 * the user. If the operation fails, then the current list is
676 * not modified.
677 */
678 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500679 if (!pid_list) {
680 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400681 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500682 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400683
684 pid_list->pid_max = READ_ONCE(pid_max);
685
686 /* Only truncating will shrink pid_max */
687 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
688 pid_list->pid_max = filtered_pids->pid_max;
689
690 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
691 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500692 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400693 kfree(pid_list);
694 return -ENOMEM;
695 }
696
697 if (filtered_pids) {
698 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000699 for_each_set_bit(pid, filtered_pids->pids,
700 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400701 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400702 nr_pids++;
703 }
704 }
705
706 while (cnt > 0) {
707
708 pos = 0;
709
710 ret = trace_get_user(&parser, ubuf, cnt, &pos);
711 if (ret < 0 || !trace_parser_loaded(&parser))
712 break;
713
714 read += ret;
715 ubuf += ret;
716 cnt -= ret;
717
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400718 ret = -EINVAL;
719 if (kstrtoul(parser.buffer, 0, &val))
720 break;
721 if (val >= pid_list->pid_max)
722 break;
723
724 pid = (pid_t)val;
725
726 set_bit(pid, pid_list->pids);
727 nr_pids++;
728
729 trace_parser_clear(&parser);
730 ret = 0;
731 }
732 trace_parser_put(&parser);
733
734 if (ret < 0) {
735 trace_free_pid_list(pid_list);
736 return ret;
737 }
738
739 if (!nr_pids) {
740 /* Cleared the list of pids */
741 trace_free_pid_list(pid_list);
742 read = ret;
743 pid_list = NULL;
744 }
745
746 *new_pid_list = pid_list;
747
748 return read;
749}
750
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500751static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400752{
753 u64 ts;
754
755 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700756 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400757 return trace_clock_local();
758
Alexander Z Lam94571582013-08-02 18:36:16 -0700759 ts = ring_buffer_time_stamp(buf->buffer, cpu);
760 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400761
762 return ts;
763}
764
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100765u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700766{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500767 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700768}
769
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400770/**
771 * tracing_is_enabled - Show if global_trace has been disabled
772 *
773 * Shows if the global trace has been enabled or not. It uses the
774 * mirror flag "buffer_disabled" to be used in fast paths such as for
775 * the irqsoff tracer. But it may be inaccurate due to races. If you
776 * need to know the accurate state, use tracing_is_on() which is a little
777 * slower, but accurate.
778 */
Steven Rostedt90369902008-11-05 16:05:44 -0500779int tracing_is_enabled(void)
780{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400781 /*
782 * For quick access (irqsoff uses this in fast path), just
783 * return the mirror variable of the state of the ring buffer.
784 * It's a little racy, but we don't really care.
785 */
786 smp_rmb();
787 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500788}
789
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200790/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400791 * trace_buf_size is the size in bytes that is allocated
792 * for a buffer. Note, the number of bytes is always rounded
793 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400794 *
795 * This number is purposely set to a low number of 16384.
796 * If the dump on oops happens, it will be much appreciated
797 * to not have to wait for all that output. Anyway this can be
798 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400800#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400801
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400802static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200803
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200804/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200805static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200806
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200807/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200808 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200809 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700810DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200811
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800812/*
813 * serialize the access of the ring buffer
814 *
815 * ring buffer serializes readers, but it is low level protection.
816 * The validity of the events (which returns by ring_buffer_peek() ..etc)
817 * are not protected by ring buffer.
818 *
819 * The content of events may become garbage if we allow other process consumes
820 * these events concurrently:
821 * A) the page of the consumed events may become a normal page
822 * (not reader page) in ring buffer, and this page will be rewrited
823 * by events producer.
824 * B) The page of the consumed events may become a page for splice_read,
825 * and this page will be returned to system.
826 *
827 * These primitives allow multi process access to different cpu ring buffer
828 * concurrently.
829 *
830 * These primitives don't distinguish read-only and read-consume access.
831 * Multi read-only access are also serialized.
832 */
833
834#ifdef CONFIG_SMP
835static DECLARE_RWSEM(all_cpu_access_lock);
836static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
837
838static inline void trace_access_lock(int cpu)
839{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500840 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800841 /* gain it for accessing the whole ring buffer. */
842 down_write(&all_cpu_access_lock);
843 } else {
844 /* gain it for accessing a cpu ring buffer. */
845
Steven Rostedtae3b5092013-01-23 15:22:59 -0500846 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800847 down_read(&all_cpu_access_lock);
848
849 /* Secondly block other access to this @cpu ring buffer. */
850 mutex_lock(&per_cpu(cpu_access_lock, cpu));
851 }
852}
853
854static inline void trace_access_unlock(int cpu)
855{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500856 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800857 up_write(&all_cpu_access_lock);
858 } else {
859 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 up_read(&all_cpu_access_lock);
861 }
862}
863
864static inline void trace_access_lock_init(void)
865{
866 int cpu;
867
868 for_each_possible_cpu(cpu)
869 mutex_init(&per_cpu(cpu_access_lock, cpu));
870}
871
872#else
873
874static DEFINE_MUTEX(access_lock);
875
876static inline void trace_access_lock(int cpu)
877{
878 (void)cpu;
879 mutex_lock(&access_lock);
880}
881
882static inline void trace_access_unlock(int cpu)
883{
884 (void)cpu;
885 mutex_unlock(&access_lock);
886}
887
888static inline void trace_access_lock_init(void)
889{
890}
891
892#endif
893
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400894#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500895static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400896 unsigned long flags,
897 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400898static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500899 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400900 unsigned long flags,
901 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400902
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400903#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500904static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400905 unsigned long flags,
906 int skip, int pc, struct pt_regs *regs)
907{
908}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400909static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400911 unsigned long flags,
912 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400913{
914}
915
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400916#endif
917
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500918static __always_inline void
919trace_event_setup(struct ring_buffer_event *event,
920 int type, unsigned long flags, int pc)
921{
922 struct trace_entry *ent = ring_buffer_event_data(event);
923
Cong Wang46710f32019-05-25 09:57:59 -0700924 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500925}
926
927static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500928__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500929 int type,
930 unsigned long len,
931 unsigned long flags, int pc)
932{
933 struct ring_buffer_event *event;
934
935 event = ring_buffer_lock_reserve(buffer, len);
936 if (event != NULL)
937 trace_event_setup(event, type, flags, pc);
938
939 return event;
940}
941
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400942void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400943{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500944 if (tr->array_buffer.buffer)
945 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400946 /*
947 * This flag is looked at when buffers haven't been allocated
948 * yet, or by some tracers (like irqsoff), that just want to
949 * know if the ring buffer has been disabled, but it can handle
950 * races of where it gets disabled but we still do a record.
951 * As the check is in the fast path of the tracers, it is more
952 * important to be fast than accurate.
953 */
954 tr->buffer_disabled = 0;
955 /* Make the flag seen by readers */
956 smp_wmb();
957}
958
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200959/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500960 * tracing_on - enable tracing buffers
961 *
962 * This function enables tracing buffers that may have been
963 * disabled with tracing_off.
964 */
965void tracing_on(void)
966{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400967 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500968}
969EXPORT_SYMBOL_GPL(tracing_on);
970
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500971
972static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500973__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500974{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700975 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500976
977 /* If this is the temp buffer, we need to commit fully */
978 if (this_cpu_read(trace_buffered_event) == event) {
979 /* Length is in event->array[0] */
980 ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 /* Release the temp buffer */
982 this_cpu_dec(trace_buffered_event_cnt);
983 } else
984 ring_buffer_unlock_commit(buffer, event);
985}
986
Steven Rostedt499e5472012-02-22 15:50:28 -0500987/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500988 * __trace_puts - write a constant string into the trace buffer.
989 * @ip: The address of the caller
990 * @str: The constant string to write
991 * @size: The size of the string.
992 */
993int __trace_puts(unsigned long ip, const char *str, int size)
994{
995 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500996 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500997 struct print_entry *entry;
998 unsigned long irq_flags;
999 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001000 int pc;
1001
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001002 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001003 return 0;
1004
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001005 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001006
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001007 if (unlikely(tracing_selftest_running || tracing_disabled))
1008 return 0;
1009
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001010 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1011
1012 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001013 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001014 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001015 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1016 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001017 if (!event) {
1018 size = 0;
1019 goto out;
1020 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001021
1022 entry = ring_buffer_event_data(event);
1023 entry->ip = ip;
1024
1025 memcpy(&entry->buf, str, size);
1026
1027 /* Add a newline if necessary */
1028 if (entry->buf[size - 1] != '\n') {
1029 entry->buf[size] = '\n';
1030 entry->buf[size + 1] = '\0';
1031 } else
1032 entry->buf[size] = '\0';
1033
1034 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001035 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001036 out:
1037 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001038 return size;
1039}
1040EXPORT_SYMBOL_GPL(__trace_puts);
1041
1042/**
1043 * __trace_bputs - write the pointer to a constant string into trace buffer
1044 * @ip: The address of the caller
1045 * @str: The constant string to write to the buffer to
1046 */
1047int __trace_bputs(unsigned long ip, const char *str)
1048{
1049 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001050 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001051 struct bputs_entry *entry;
1052 unsigned long irq_flags;
1053 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001054 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001055 int pc;
1056
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001057 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001058 return 0;
1059
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001060 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001061
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001062 if (unlikely(tracing_selftest_running || tracing_disabled))
1063 return 0;
1064
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001065 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001066 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001067
1068 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001069 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1070 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001071 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001072 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001073
1074 entry = ring_buffer_event_data(event);
1075 entry->ip = ip;
1076 entry->str = str;
1077
1078 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001079 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001080
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001081 ret = 1;
1082 out:
1083 ring_buffer_nest_end(buffer);
1084 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001085}
1086EXPORT_SYMBOL_GPL(__trace_bputs);
1087
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001088#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001089static void tracing_snapshot_instance_cond(struct trace_array *tr,
1090 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001091{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001092 struct tracer *tracer = tr->current_trace;
1093 unsigned long flags;
1094
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001095 if (in_nmi()) {
1096 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1097 internal_trace_puts("*** snapshot is being ignored ***\n");
1098 return;
1099 }
1100
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001101 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001102 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1103 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001104 tracing_off();
1105 return;
1106 }
1107
1108 /* Note, snapshot can not be used when the tracer uses it */
1109 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001110 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1111 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001112 return;
1113 }
1114
1115 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001116 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001117 local_irq_restore(flags);
1118}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001119
Tom Zanussia35873a2019-02-13 17:42:45 -06001120void tracing_snapshot_instance(struct trace_array *tr)
1121{
1122 tracing_snapshot_instance_cond(tr, NULL);
1123}
1124
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001125/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001126 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001127 *
1128 * This causes a swap between the snapshot buffer and the current live
1129 * tracing buffer. You can use this to take snapshots of the live
1130 * trace when some condition is triggered, but continue to trace.
1131 *
1132 * Note, make sure to allocate the snapshot with either
1133 * a tracing_snapshot_alloc(), or by doing it manually
1134 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1135 *
1136 * If the snapshot buffer is not allocated, it will stop tracing.
1137 * Basically making a permanent snapshot.
1138 */
1139void tracing_snapshot(void)
1140{
1141 struct trace_array *tr = &global_trace;
1142
1143 tracing_snapshot_instance(tr);
1144}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001145EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001146
Tom Zanussia35873a2019-02-13 17:42:45 -06001147/**
1148 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1149 * @tr: The tracing instance to snapshot
1150 * @cond_data: The data to be tested conditionally, and possibly saved
1151 *
1152 * This is the same as tracing_snapshot() except that the snapshot is
1153 * conditional - the snapshot will only happen if the
1154 * cond_snapshot.update() implementation receiving the cond_data
1155 * returns true, which means that the trace array's cond_snapshot
1156 * update() operation used the cond_data to determine whether the
1157 * snapshot should be taken, and if it was, presumably saved it along
1158 * with the snapshot.
1159 */
1160void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1161{
1162 tracing_snapshot_instance_cond(tr, cond_data);
1163}
1164EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1165
1166/**
1167 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1168 * @tr: The tracing instance
1169 *
1170 * When the user enables a conditional snapshot using
1171 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1172 * with the snapshot. This accessor is used to retrieve it.
1173 *
1174 * Should not be called from cond_snapshot.update(), since it takes
1175 * the tr->max_lock lock, which the code calling
1176 * cond_snapshot.update() has already done.
1177 *
1178 * Returns the cond_data associated with the trace array's snapshot.
1179 */
1180void *tracing_cond_snapshot_data(struct trace_array *tr)
1181{
1182 void *cond_data = NULL;
1183
1184 arch_spin_lock(&tr->max_lock);
1185
1186 if (tr->cond_snapshot)
1187 cond_data = tr->cond_snapshot->cond_data;
1188
1189 arch_spin_unlock(&tr->max_lock);
1190
1191 return cond_data;
1192}
1193EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1194
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001195static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1196 struct array_buffer *size_buf, int cpu_id);
1197static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001198
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001199int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001200{
1201 int ret;
1202
1203 if (!tr->allocated_snapshot) {
1204
1205 /* allocate spare buffer */
1206 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001207 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001208 if (ret < 0)
1209 return ret;
1210
1211 tr->allocated_snapshot = true;
1212 }
1213
1214 return 0;
1215}
1216
Fabian Frederickad1438a2014-04-17 21:44:42 +02001217static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001218{
1219 /*
1220 * We don't free the ring buffer. instead, resize it because
1221 * The max_tr ring buffer has some state (e.g. ring->clock) and
1222 * we want preserve it.
1223 */
1224 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1225 set_buffer_entries(&tr->max_buffer, 1);
1226 tracing_reset_online_cpus(&tr->max_buffer);
1227 tr->allocated_snapshot = false;
1228}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001229
1230/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001231 * tracing_alloc_snapshot - allocate snapshot buffer.
1232 *
1233 * This only allocates the snapshot buffer if it isn't already
1234 * allocated - it doesn't also take a snapshot.
1235 *
1236 * This is meant to be used in cases where the snapshot buffer needs
1237 * to be set up for events that can't sleep but need to be able to
1238 * trigger a snapshot.
1239 */
1240int tracing_alloc_snapshot(void)
1241{
1242 struct trace_array *tr = &global_trace;
1243 int ret;
1244
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001245 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001246 WARN_ON(ret < 0);
1247
1248 return ret;
1249}
1250EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1251
1252/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001253 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001254 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001255 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001256 * snapshot buffer if it isn't already allocated. Use this only
1257 * where it is safe to sleep, as the allocation may sleep.
1258 *
1259 * This causes a swap between the snapshot buffer and the current live
1260 * tracing buffer. You can use this to take snapshots of the live
1261 * trace when some condition is triggered, but continue to trace.
1262 */
1263void tracing_snapshot_alloc(void)
1264{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 int ret;
1266
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001267 ret = tracing_alloc_snapshot();
1268 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001269 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001270
1271 tracing_snapshot();
1272}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001273EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001274
1275/**
1276 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1277 * @tr: The tracing instance
1278 * @cond_data: User data to associate with the snapshot
1279 * @update: Implementation of the cond_snapshot update function
1280 *
1281 * Check whether the conditional snapshot for the given instance has
1282 * already been enabled, or if the current tracer is already using a
1283 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1284 * save the cond_data and update function inside.
1285 *
1286 * Returns 0 if successful, error otherwise.
1287 */
1288int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1289 cond_update_fn_t update)
1290{
1291 struct cond_snapshot *cond_snapshot;
1292 int ret = 0;
1293
1294 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1295 if (!cond_snapshot)
1296 return -ENOMEM;
1297
1298 cond_snapshot->cond_data = cond_data;
1299 cond_snapshot->update = update;
1300
1301 mutex_lock(&trace_types_lock);
1302
1303 ret = tracing_alloc_snapshot_instance(tr);
1304 if (ret)
1305 goto fail_unlock;
1306
1307 if (tr->current_trace->use_max_tr) {
1308 ret = -EBUSY;
1309 goto fail_unlock;
1310 }
1311
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001312 /*
1313 * The cond_snapshot can only change to NULL without the
1314 * trace_types_lock. We don't care if we race with it going
1315 * to NULL, but we want to make sure that it's not set to
1316 * something other than NULL when we get here, which we can
1317 * do safely with only holding the trace_types_lock and not
1318 * having to take the max_lock.
1319 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001320 if (tr->cond_snapshot) {
1321 ret = -EBUSY;
1322 goto fail_unlock;
1323 }
1324
1325 arch_spin_lock(&tr->max_lock);
1326 tr->cond_snapshot = cond_snapshot;
1327 arch_spin_unlock(&tr->max_lock);
1328
1329 mutex_unlock(&trace_types_lock);
1330
1331 return ret;
1332
1333 fail_unlock:
1334 mutex_unlock(&trace_types_lock);
1335 kfree(cond_snapshot);
1336 return ret;
1337}
1338EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1339
1340/**
1341 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1342 * @tr: The tracing instance
1343 *
1344 * Check whether the conditional snapshot for the given instance is
1345 * enabled; if so, free the cond_snapshot associated with it,
1346 * otherwise return -EINVAL.
1347 *
1348 * Returns 0 if successful, error otherwise.
1349 */
1350int tracing_snapshot_cond_disable(struct trace_array *tr)
1351{
1352 int ret = 0;
1353
1354 arch_spin_lock(&tr->max_lock);
1355
1356 if (!tr->cond_snapshot)
1357 ret = -EINVAL;
1358 else {
1359 kfree(tr->cond_snapshot);
1360 tr->cond_snapshot = NULL;
1361 }
1362
1363 arch_spin_unlock(&tr->max_lock);
1364
1365 return ret;
1366}
1367EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001368#else
1369void tracing_snapshot(void)
1370{
1371 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1372}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001373EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001374void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1375{
1376 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1377}
1378EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001379int tracing_alloc_snapshot(void)
1380{
1381 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1382 return -ENODEV;
1383}
1384EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001385void tracing_snapshot_alloc(void)
1386{
1387 /* Give warning */
1388 tracing_snapshot();
1389}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001390EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001391void *tracing_cond_snapshot_data(struct trace_array *tr)
1392{
1393 return NULL;
1394}
1395EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1396int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1397{
1398 return -ENODEV;
1399}
1400EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1401int tracing_snapshot_cond_disable(struct trace_array *tr)
1402{
1403 return false;
1404}
1405EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001406#endif /* CONFIG_TRACER_SNAPSHOT */
1407
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001408void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001409{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001410 if (tr->array_buffer.buffer)
1411 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001412 /*
1413 * This flag is looked at when buffers haven't been allocated
1414 * yet, or by some tracers (like irqsoff), that just want to
1415 * know if the ring buffer has been disabled, but it can handle
1416 * races of where it gets disabled but we still do a record.
1417 * As the check is in the fast path of the tracers, it is more
1418 * important to be fast than accurate.
1419 */
1420 tr->buffer_disabled = 1;
1421 /* Make the flag seen by readers */
1422 smp_wmb();
1423}
1424
Steven Rostedt499e5472012-02-22 15:50:28 -05001425/**
1426 * tracing_off - turn off tracing buffers
1427 *
1428 * This function stops the tracing buffers from recording data.
1429 * It does not disable any overhead the tracers themselves may
1430 * be causing. This function simply causes all recording to
1431 * the ring buffers to fail.
1432 */
1433void tracing_off(void)
1434{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001435 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001436}
1437EXPORT_SYMBOL_GPL(tracing_off);
1438
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001439void disable_trace_on_warning(void)
1440{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001441 if (__disable_trace_on_warning) {
1442 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1443 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001444 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001445 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001446}
1447
Steven Rostedt499e5472012-02-22 15:50:28 -05001448/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001449 * tracer_tracing_is_on - show real state of ring buffer enabled
1450 * @tr : the trace array to know if ring buffer is enabled
1451 *
1452 * Shows real state of the ring buffer if it is enabled or not.
1453 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001454bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001455{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001456 if (tr->array_buffer.buffer)
1457 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001458 return !tr->buffer_disabled;
1459}
1460
Steven Rostedt499e5472012-02-22 15:50:28 -05001461/**
1462 * tracing_is_on - show state of ring buffers enabled
1463 */
1464int tracing_is_on(void)
1465{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001466 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001467}
1468EXPORT_SYMBOL_GPL(tracing_is_on);
1469
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001470static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001471{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001472 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001473
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001474 if (!str)
1475 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001476 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001477 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001478 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001479 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001480 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481 return 1;
1482}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001483__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484
Tim Bird0e950172010-02-25 15:36:43 -08001485static int __init set_tracing_thresh(char *str)
1486{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001487 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001488 int ret;
1489
1490 if (!str)
1491 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001492 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001493 if (ret < 0)
1494 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001495 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001496 return 1;
1497}
1498__setup("tracing_thresh=", set_tracing_thresh);
1499
Steven Rostedt57f50be2008-05-12 21:20:44 +02001500unsigned long nsecs_to_usecs(unsigned long nsecs)
1501{
1502 return nsecs / 1000;
1503}
1504
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001505/*
1506 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001507 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001508 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001509 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001510 */
1511#undef C
1512#define C(a, b) b
1513
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001514/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001516 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 NULL
1518};
1519
Zhaolei5079f322009-08-25 16:12:56 +08001520static struct {
1521 u64 (*func)(void);
1522 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001523 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001524} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001525 { trace_clock_local, "local", 1 },
1526 { trace_clock_global, "global", 1 },
1527 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001528 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001529 { trace_clock, "perf", 1 },
1530 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001531 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001532 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001533 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001534};
1535
Tom Zanussi860f9f62018-01-15 20:51:48 -06001536bool trace_clock_in_ns(struct trace_array *tr)
1537{
1538 if (trace_clocks[tr->clock_id].in_ns)
1539 return true;
1540
1541 return false;
1542}
1543
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001544/*
1545 * trace_parser_get_init - gets the buffer for trace parser
1546 */
1547int trace_parser_get_init(struct trace_parser *parser, int size)
1548{
1549 memset(parser, 0, sizeof(*parser));
1550
1551 parser->buffer = kmalloc(size, GFP_KERNEL);
1552 if (!parser->buffer)
1553 return 1;
1554
1555 parser->size = size;
1556 return 0;
1557}
1558
1559/*
1560 * trace_parser_put - frees the buffer for trace parser
1561 */
1562void trace_parser_put(struct trace_parser *parser)
1563{
1564 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001565 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001566}
1567
1568/*
1569 * trace_get_user - reads the user input string separated by space
1570 * (matched by isspace(ch))
1571 *
1572 * For each string found the 'struct trace_parser' is updated,
1573 * and the function returns.
1574 *
1575 * Returns number of bytes read.
1576 *
1577 * See kernel/trace/trace.h for 'struct trace_parser' details.
1578 */
1579int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1580 size_t cnt, loff_t *ppos)
1581{
1582 char ch;
1583 size_t read = 0;
1584 ssize_t ret;
1585
1586 if (!*ppos)
1587 trace_parser_clear(parser);
1588
1589 ret = get_user(ch, ubuf++);
1590 if (ret)
1591 goto out;
1592
1593 read++;
1594 cnt--;
1595
1596 /*
1597 * The parser is not finished with the last write,
1598 * continue reading the user input without skipping spaces.
1599 */
1600 if (!parser->cont) {
1601 /* skip white space */
1602 while (cnt && isspace(ch)) {
1603 ret = get_user(ch, ubuf++);
1604 if (ret)
1605 goto out;
1606 read++;
1607 cnt--;
1608 }
1609
Changbin Du76638d92018-01-16 17:02:29 +08001610 parser->idx = 0;
1611
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001612 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001613 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001614 *ppos += read;
1615 ret = read;
1616 goto out;
1617 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001618 }
1619
1620 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001621 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001622 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 parser->buffer[parser->idx++] = ch;
1624 else {
1625 ret = -EINVAL;
1626 goto out;
1627 }
1628 ret = get_user(ch, ubuf++);
1629 if (ret)
1630 goto out;
1631 read++;
1632 cnt--;
1633 }
1634
1635 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001636 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001637 parser->buffer[parser->idx] = 0;
1638 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001639 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001640 parser->cont = true;
1641 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001642 /* Make sure the parsed string always terminates with '\0'. */
1643 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001644 } else {
1645 ret = -EINVAL;
1646 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001647 }
1648
1649 *ppos += read;
1650 ret = read;
1651
1652out:
1653 return ret;
1654}
1655
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001656/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001657static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001658{
1659 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001660
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001661 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001662 return -EBUSY;
1663
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001664 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001665 if (cnt > len)
1666 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001667 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001668
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001669 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001670 return cnt;
1671}
1672
Tim Bird0e950172010-02-25 15:36:43 -08001673unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001674static const struct file_operations tracing_max_lat_fops;
1675
1676#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1677 defined(CONFIG_FSNOTIFY)
1678
1679static struct workqueue_struct *fsnotify_wq;
1680
1681static void latency_fsnotify_workfn(struct work_struct *work)
1682{
1683 struct trace_array *tr = container_of(work, struct trace_array,
1684 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001685 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001686}
1687
1688static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1689{
1690 struct trace_array *tr = container_of(iwork, struct trace_array,
1691 fsnotify_irqwork);
1692 queue_work(fsnotify_wq, &tr->fsnotify_work);
1693}
1694
1695static void trace_create_maxlat_file(struct trace_array *tr,
1696 struct dentry *d_tracer)
1697{
1698 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1699 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1700 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1701 d_tracer, &tr->max_latency,
1702 &tracing_max_lat_fops);
1703}
1704
1705__init static int latency_fsnotify_init(void)
1706{
1707 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1708 WQ_UNBOUND | WQ_HIGHPRI, 0);
1709 if (!fsnotify_wq) {
1710 pr_err("Unable to allocate tr_max_lat_wq\n");
1711 return -ENOMEM;
1712 }
1713 return 0;
1714}
1715
1716late_initcall_sync(latency_fsnotify_init);
1717
1718void latency_fsnotify(struct trace_array *tr)
1719{
1720 if (!fsnotify_wq)
1721 return;
1722 /*
1723 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1724 * possible that we are called from __schedule() or do_idle(), which
1725 * could cause a deadlock.
1726 */
1727 irq_work_queue(&tr->fsnotify_irqwork);
1728}
1729
1730/*
1731 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1732 * defined(CONFIG_FSNOTIFY)
1733 */
1734#else
1735
1736#define trace_create_maxlat_file(tr, d_tracer) \
1737 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1738 &tr->max_latency, &tracing_max_lat_fops)
1739
1740#endif
Tim Bird0e950172010-02-25 15:36:43 -08001741
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001742#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001743/*
1744 * Copy the new maximum trace into the separate maximum-trace
1745 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001746 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001747 */
1748static void
1749__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1750{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001751 struct array_buffer *trace_buf = &tr->array_buffer;
1752 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001753 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1754 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001755
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001756 max_buf->cpu = cpu;
1757 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001758
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001759 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001760 max_data->critical_start = data->critical_start;
1761 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001762
Tom Zanussi85f726a2019-03-05 10:12:00 -06001763 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001764 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001765 /*
1766 * If tsk == current, then use current_uid(), as that does not use
1767 * RCU. The irq tracer can be called out of RCU scope.
1768 */
1769 if (tsk == current)
1770 max_data->uid = current_uid();
1771 else
1772 max_data->uid = task_uid(tsk);
1773
Steven Rostedt8248ac02009-09-02 12:27:41 -04001774 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1775 max_data->policy = tsk->policy;
1776 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001777
1778 /* record this tasks comm */
1779 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001780 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001781}
1782
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001783/**
1784 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1785 * @tr: tracer
1786 * @tsk: the task with the latency
1787 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001788 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001789 *
1790 * Flip the buffers between the @tr and the max_tr and record information
1791 * about which task was the cause of this latency.
1792 */
Ingo Molnare309b412008-05-12 21:20:51 +02001793void
Tom Zanussia35873a2019-02-13 17:42:45 -06001794update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1795 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001796{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001797 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001798 return;
1799
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001800 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001801
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001802 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001803 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001804 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001805 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001806 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001807
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001808 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001809
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001810 /* Inherit the recordable setting from array_buffer */
1811 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001812 ring_buffer_record_on(tr->max_buffer.buffer);
1813 else
1814 ring_buffer_record_off(tr->max_buffer.buffer);
1815
Tom Zanussia35873a2019-02-13 17:42:45 -06001816#ifdef CONFIG_TRACER_SNAPSHOT
1817 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1818 goto out_unlock;
1819#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001820 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001821
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001822 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001823
1824 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001825 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001826}
1827
1828/**
1829 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001830 * @tr: tracer
1831 * @tsk: task with the latency
1832 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001833 *
1834 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001835 */
Ingo Molnare309b412008-05-12 21:20:51 +02001836void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001837update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1838{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001839 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001841 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001842 return;
1843
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001844 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001845 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001846 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001847 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001848 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001849 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001850
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001851 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001852
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001853 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001854
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001855 if (ret == -EBUSY) {
1856 /*
1857 * We failed to swap the buffer due to a commit taking
1858 * place on this CPU. We fail to record, but we reset
1859 * the max trace buffer (no one writes directly to it)
1860 * and flag that it failed.
1861 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001862 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001863 "Failed to swap buffers due to commit in progress\n");
1864 }
1865
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001866 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001867
1868 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001869 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001870}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001871#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001872
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001873static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001874{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001875 /* Iterators are static, they should be filled or empty */
1876 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001877 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001878
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001879 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001880 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001881}
1882
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001883#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001884static bool selftests_can_run;
1885
1886struct trace_selftests {
1887 struct list_head list;
1888 struct tracer *type;
1889};
1890
1891static LIST_HEAD(postponed_selftests);
1892
1893static int save_selftest(struct tracer *type)
1894{
1895 struct trace_selftests *selftest;
1896
1897 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1898 if (!selftest)
1899 return -ENOMEM;
1900
1901 selftest->type = type;
1902 list_add(&selftest->list, &postponed_selftests);
1903 return 0;
1904}
1905
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001906static int run_tracer_selftest(struct tracer *type)
1907{
1908 struct trace_array *tr = &global_trace;
1909 struct tracer *saved_tracer = tr->current_trace;
1910 int ret;
1911
1912 if (!type->selftest || tracing_selftest_disabled)
1913 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001914
1915 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001916 * If a tracer registers early in boot up (before scheduling is
1917 * initialized and such), then do not run its selftests yet.
1918 * Instead, run it a little later in the boot process.
1919 */
1920 if (!selftests_can_run)
1921 return save_selftest(type);
1922
1923 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001924 * Run a selftest on this tracer.
1925 * Here we reset the trace buffer, and set the current
1926 * tracer to be this tracer. The tracer can then run some
1927 * internal tracing to verify that everything is in order.
1928 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001929 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001930 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001931
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001932 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001933
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001934#ifdef CONFIG_TRACER_MAX_TRACE
1935 if (type->use_max_tr) {
1936 /* If we expanded the buffers, make sure the max is expanded too */
1937 if (ring_buffer_expanded)
1938 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1939 RING_BUFFER_ALL_CPUS);
1940 tr->allocated_snapshot = true;
1941 }
1942#endif
1943
1944 /* the test is responsible for initializing and enabling */
1945 pr_info("Testing tracer %s: ", type->name);
1946 ret = type->selftest(type, tr);
1947 /* the test is responsible for resetting too */
1948 tr->current_trace = saved_tracer;
1949 if (ret) {
1950 printk(KERN_CONT "FAILED!\n");
1951 /* Add the warning after printing 'FAILED' */
1952 WARN_ON(1);
1953 return -1;
1954 }
1955 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001956 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001957
1958#ifdef CONFIG_TRACER_MAX_TRACE
1959 if (type->use_max_tr) {
1960 tr->allocated_snapshot = false;
1961
1962 /* Shrink the max buffer again */
1963 if (ring_buffer_expanded)
1964 ring_buffer_resize(tr->max_buffer.buffer, 1,
1965 RING_BUFFER_ALL_CPUS);
1966 }
1967#endif
1968
1969 printk(KERN_CONT "PASSED\n");
1970 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001971}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001972
1973static __init int init_trace_selftests(void)
1974{
1975 struct trace_selftests *p, *n;
1976 struct tracer *t, **last;
1977 int ret;
1978
1979 selftests_can_run = true;
1980
1981 mutex_lock(&trace_types_lock);
1982
1983 if (list_empty(&postponed_selftests))
1984 goto out;
1985
1986 pr_info("Running postponed tracer tests:\n");
1987
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001988 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001989 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001990 /* This loop can take minutes when sanitizers are enabled, so
1991 * lets make sure we allow RCU processing.
1992 */
1993 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001994 ret = run_tracer_selftest(p->type);
1995 /* If the test fails, then warn and remove from available_tracers */
1996 if (ret < 0) {
1997 WARN(1, "tracer: %s failed selftest, disabling\n",
1998 p->type->name);
1999 last = &trace_types;
2000 for (t = trace_types; t; t = t->next) {
2001 if (t == p->type) {
2002 *last = t->next;
2003 break;
2004 }
2005 last = &t->next;
2006 }
2007 }
2008 list_del(&p->list);
2009 kfree(p);
2010 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002011 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002012
2013 out:
2014 mutex_unlock(&trace_types_lock);
2015
2016 return 0;
2017}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002018core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002019#else
2020static inline int run_tracer_selftest(struct tracer *type)
2021{
2022 return 0;
2023}
2024#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002025
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002026static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2027
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002028static void __init apply_trace_boot_options(void);
2029
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002030/**
2031 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002032 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002033 *
2034 * Register a new plugin tracer.
2035 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002036int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002037{
2038 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002039 int ret = 0;
2040
2041 if (!type->name) {
2042 pr_info("Tracer must have a name\n");
2043 return -1;
2044 }
2045
Dan Carpenter24a461d2010-07-10 12:06:44 +02002046 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002047 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2048 return -1;
2049 }
2050
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002051 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002052 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002053 type->name);
2054 return -EPERM;
2055 }
2056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002057 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002058
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002059 tracing_selftest_running = true;
2060
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002061 for (t = trace_types; t; t = t->next) {
2062 if (strcmp(type->name, t->name) == 0) {
2063 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002064 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002065 type->name);
2066 ret = -1;
2067 goto out;
2068 }
2069 }
2070
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002071 if (!type->set_flag)
2072 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002073 if (!type->flags) {
2074 /*allocate a dummy tracer_flags*/
2075 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002076 if (!type->flags) {
2077 ret = -ENOMEM;
2078 goto out;
2079 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002080 type->flags->val = 0;
2081 type->flags->opts = dummy_tracer_opt;
2082 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002083 if (!type->flags->opts)
2084 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002085
Chunyu Hud39cdd22016-03-08 21:37:01 +08002086 /* store the tracer for __set_tracer_option */
2087 type->flags->trace = type;
2088
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002089 ret = run_tracer_selftest(type);
2090 if (ret < 0)
2091 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002092
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002093 type->next = trace_types;
2094 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002095 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002096
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002097 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002098 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002099 mutex_unlock(&trace_types_lock);
2100
Steven Rostedtdac74942009-02-05 01:13:38 -05002101 if (ret || !default_bootup_tracer)
2102 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002103
Li Zefanee6c2c12009-09-18 14:06:47 +08002104 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002105 goto out_unlock;
2106
2107 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2108 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002109 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002110 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002111
2112 apply_trace_boot_options();
2113
Steven Rostedtdac74942009-02-05 01:13:38 -05002114 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002115 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05002116#ifdef CONFIG_FTRACE_STARTUP_TEST
2117 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
2118 type->name);
2119#endif
2120
2121 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002122 return ret;
2123}
2124
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002125static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002126{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002127 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002128
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002129 if (!buffer)
2130 return;
2131
Steven Rostedtf6339032009-09-04 12:35:16 -04002132 ring_buffer_record_disable(buffer);
2133
2134 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002135 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002136 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002137
2138 ring_buffer_record_enable(buffer);
2139}
2140
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002141void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002142{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002143 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002144
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002145 if (!buffer)
2146 return;
2147
Steven Rostedt621968c2009-09-04 12:02:35 -04002148 ring_buffer_record_disable(buffer);
2149
2150 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002151 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002152
Alexander Z Lam94571582013-08-02 18:36:16 -07002153 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002154
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002155 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002156
2157 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002158}
2159
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002160/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002161void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002162{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002163 struct trace_array *tr;
2164
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002165 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002166 if (!tr->clear_trace)
2167 continue;
2168 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002169 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002170#ifdef CONFIG_TRACER_MAX_TRACE
2171 tracing_reset_online_cpus(&tr->max_buffer);
2172#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002173 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002174}
2175
Joel Fernandesd914ba32017-06-26 19:01:55 -07002176static int *tgid_map;
2177
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002178#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002179#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002180static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002181struct saved_cmdlines_buffer {
2182 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2183 unsigned *map_cmdline_to_pid;
2184 unsigned cmdline_num;
2185 int cmdline_idx;
2186 char *saved_cmdlines;
2187};
2188static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002189
Steven Rostedt25b0b442008-05-12 21:21:00 +02002190/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002191static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002192
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002193static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002194{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002195 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2196}
2197
2198static inline void set_cmdline(int idx, const char *cmdline)
2199{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002200 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002201}
2202
2203static int allocate_cmdlines_buffer(unsigned int val,
2204 struct saved_cmdlines_buffer *s)
2205{
Kees Cook6da2ec52018-06-12 13:55:00 -07002206 s->map_cmdline_to_pid = kmalloc_array(val,
2207 sizeof(*s->map_cmdline_to_pid),
2208 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002209 if (!s->map_cmdline_to_pid)
2210 return -ENOMEM;
2211
Kees Cook6da2ec52018-06-12 13:55:00 -07002212 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002213 if (!s->saved_cmdlines) {
2214 kfree(s->map_cmdline_to_pid);
2215 return -ENOMEM;
2216 }
2217
2218 s->cmdline_idx = 0;
2219 s->cmdline_num = val;
2220 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2221 sizeof(s->map_pid_to_cmdline));
2222 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2223 val * sizeof(*s->map_cmdline_to_pid));
2224
2225 return 0;
2226}
2227
2228static int trace_create_savedcmd(void)
2229{
2230 int ret;
2231
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002232 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002233 if (!savedcmd)
2234 return -ENOMEM;
2235
2236 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2237 if (ret < 0) {
2238 kfree(savedcmd);
2239 savedcmd = NULL;
2240 return -ENOMEM;
2241 }
2242
2243 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244}
2245
Carsten Emdeb5130b12009-09-13 01:43:07 +02002246int is_tracing_stopped(void)
2247{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002248 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002249}
2250
Steven Rostedt0f048702008-11-05 16:05:44 -05002251/**
2252 * tracing_start - quick start of the tracer
2253 *
2254 * If tracing is enabled but was stopped by tracing_stop,
2255 * this will start the tracer back up.
2256 */
2257void tracing_start(void)
2258{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002259 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002260 unsigned long flags;
2261
2262 if (tracing_disabled)
2263 return;
2264
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002265 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2266 if (--global_trace.stop_count) {
2267 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002268 /* Someone screwed up their debugging */
2269 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002270 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002271 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002272 goto out;
2273 }
2274
Steven Rostedta2f80712010-03-12 19:56:00 -05002275 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002276 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002277
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002278 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002279 if (buffer)
2280 ring_buffer_record_enable(buffer);
2281
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002282#ifdef CONFIG_TRACER_MAX_TRACE
2283 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002284 if (buffer)
2285 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002286#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002287
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002288 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002289
Steven Rostedt0f048702008-11-05 16:05:44 -05002290 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002291 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2292}
2293
2294static void tracing_start_tr(struct trace_array *tr)
2295{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002296 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002297 unsigned long flags;
2298
2299 if (tracing_disabled)
2300 return;
2301
2302 /* If global, we need to also start the max tracer */
2303 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2304 return tracing_start();
2305
2306 raw_spin_lock_irqsave(&tr->start_lock, flags);
2307
2308 if (--tr->stop_count) {
2309 if (tr->stop_count < 0) {
2310 /* Someone screwed up their debugging */
2311 WARN_ON_ONCE(1);
2312 tr->stop_count = 0;
2313 }
2314 goto out;
2315 }
2316
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002317 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002318 if (buffer)
2319 ring_buffer_record_enable(buffer);
2320
2321 out:
2322 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002323}
2324
2325/**
2326 * tracing_stop - quick stop of the tracer
2327 *
2328 * Light weight way to stop tracing. Use in conjunction with
2329 * tracing_start.
2330 */
2331void tracing_stop(void)
2332{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002333 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002334 unsigned long flags;
2335
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002336 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2337 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002338 goto out;
2339
Steven Rostedta2f80712010-03-12 19:56:00 -05002340 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002341 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002342
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002343 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002344 if (buffer)
2345 ring_buffer_record_disable(buffer);
2346
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002347#ifdef CONFIG_TRACER_MAX_TRACE
2348 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002349 if (buffer)
2350 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002351#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002352
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002353 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002354
Steven Rostedt0f048702008-11-05 16:05:44 -05002355 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002356 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2357}
2358
2359static void tracing_stop_tr(struct trace_array *tr)
2360{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002361 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002362 unsigned long flags;
2363
2364 /* If global, we need to also stop the max tracer */
2365 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2366 return tracing_stop();
2367
2368 raw_spin_lock_irqsave(&tr->start_lock, flags);
2369 if (tr->stop_count++)
2370 goto out;
2371
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002372 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002373 if (buffer)
2374 ring_buffer_record_disable(buffer);
2375
2376 out:
2377 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002378}
2379
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002380static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381{
Carsten Emdea635cf02009-03-18 09:00:41 +01002382 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002383
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002384 /* treat recording of idle task as a success */
2385 if (!tsk->pid)
2386 return 1;
2387
2388 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002389 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390
2391 /*
2392 * It's not the end of the world if we don't get
2393 * the lock, but we also don't want to spin
2394 * nor do we want to disable interrupts,
2395 * so if we miss here, then better luck next time.
2396 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002397 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002398 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002400 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002401 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002402 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403
Carsten Emdea635cf02009-03-18 09:00:41 +01002404 /*
2405 * Check whether the cmdline buffer at idx has a pid
2406 * mapped. We are going to overwrite that entry so we
2407 * need to clear the map_pid_to_cmdline. Otherwise we
2408 * would read the new comm for the old pid.
2409 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002410 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002411 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002412 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002414 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2415 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002416
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002417 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 }
2419
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002420 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002422 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002423
2424 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425}
2426
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002427static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429 unsigned map;
2430
Steven Rostedt4ca530852009-03-16 19:20:15 -04002431 if (!pid) {
2432 strcpy(comm, "<idle>");
2433 return;
2434 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435
Steven Rostedt74bf4072010-01-25 15:11:53 -05002436 if (WARN_ON_ONCE(pid < 0)) {
2437 strcpy(comm, "<XXX>");
2438 return;
2439 }
2440
Steven Rostedt4ca530852009-03-16 19:20:15 -04002441 if (pid > PID_MAX_DEFAULT) {
2442 strcpy(comm, "<...>");
2443 return;
2444 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002445
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002446 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002447 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302448 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002449 else
2450 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002451}
2452
2453void trace_find_cmdline(int pid, char comm[])
2454{
2455 preempt_disable();
2456 arch_spin_lock(&trace_cmdline_lock);
2457
2458 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002460 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002461 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462}
2463
Joel Fernandesd914ba32017-06-26 19:01:55 -07002464int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002466 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2467 return 0;
2468
2469 return tgid_map[pid];
2470}
2471
2472static int trace_save_tgid(struct task_struct *tsk)
2473{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002474 /* treat recording of idle task as a success */
2475 if (!tsk->pid)
2476 return 1;
2477
2478 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002479 return 0;
2480
2481 tgid_map[tsk->pid] = tsk->tgid;
2482 return 1;
2483}
2484
2485static bool tracing_record_taskinfo_skip(int flags)
2486{
2487 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2488 return true;
2489 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2490 return true;
2491 if (!__this_cpu_read(trace_taskinfo_save))
2492 return true;
2493 return false;
2494}
2495
2496/**
2497 * tracing_record_taskinfo - record the task info of a task
2498 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002499 * @task: task to record
2500 * @flags: TRACE_RECORD_CMDLINE for recording comm
2501 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002502 */
2503void tracing_record_taskinfo(struct task_struct *task, int flags)
2504{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002505 bool done;
2506
Joel Fernandesd914ba32017-06-26 19:01:55 -07002507 if (tracing_record_taskinfo_skip(flags))
2508 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002509
2510 /*
2511 * Record as much task information as possible. If some fail, continue
2512 * to try to record the others.
2513 */
2514 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2515 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2516
2517 /* If recording any information failed, retry again soon. */
2518 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519 return;
2520
Joel Fernandesd914ba32017-06-26 19:01:55 -07002521 __this_cpu_write(trace_taskinfo_save, false);
2522}
2523
2524/**
2525 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2526 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002527 * @prev: previous task during sched_switch
2528 * @next: next task during sched_switch
2529 * @flags: TRACE_RECORD_CMDLINE for recording comm
2530 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002531 */
2532void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2533 struct task_struct *next, int flags)
2534{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002535 bool done;
2536
Joel Fernandesd914ba32017-06-26 19:01:55 -07002537 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002538 return;
2539
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002540 /*
2541 * Record as much task information as possible. If some fail, continue
2542 * to try to record the others.
2543 */
2544 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2545 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2546 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2547 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002548
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002549 /* If recording any information failed, retry again soon. */
2550 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002551 return;
2552
2553 __this_cpu_write(trace_taskinfo_save, false);
2554}
2555
2556/* Helpers to record a specific task information */
2557void tracing_record_cmdline(struct task_struct *task)
2558{
2559 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2560}
2561
2562void tracing_record_tgid(struct task_struct *task)
2563{
2564 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565}
2566
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002567/*
2568 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2569 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2570 * simplifies those functions and keeps them in sync.
2571 */
2572enum print_line_t trace_handle_return(struct trace_seq *s)
2573{
2574 return trace_seq_has_overflowed(s) ?
2575 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2576}
2577EXPORT_SYMBOL_GPL(trace_handle_return);
2578
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002579void
Cong Wang46710f32019-05-25 09:57:59 -07002580tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2581 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582{
2583 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584
Steven Rostedt777e2082008-09-29 23:02:42 -04002585 entry->preempt_count = pc & 0xff;
2586 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002587 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002588 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002589#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002590 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002591#else
2592 TRACE_FLAG_IRQS_NOSUPPORT |
2593#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002594 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302596 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002597 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2598 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002600EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601
Steven Rostedte77405a2009-09-02 14:17:06 -04002602struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002603trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002604 int type,
2605 unsigned long len,
2606 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002607{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002608 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002609}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002610
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002611DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2612DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2613static int trace_buffered_event_ref;
2614
2615/**
2616 * trace_buffered_event_enable - enable buffering events
2617 *
2618 * When events are being filtered, it is quicker to use a temporary
2619 * buffer to write the event data into if there's a likely chance
2620 * that it will not be committed. The discard of the ring buffer
2621 * is not as fast as committing, and is much slower than copying
2622 * a commit.
2623 *
2624 * When an event is to be filtered, allocate per cpu buffers to
2625 * write the event data into, and if the event is filtered and discarded
2626 * it is simply dropped, otherwise, the entire data is to be committed
2627 * in one shot.
2628 */
2629void trace_buffered_event_enable(void)
2630{
2631 struct ring_buffer_event *event;
2632 struct page *page;
2633 int cpu;
2634
2635 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2636
2637 if (trace_buffered_event_ref++)
2638 return;
2639
2640 for_each_tracing_cpu(cpu) {
2641 page = alloc_pages_node(cpu_to_node(cpu),
2642 GFP_KERNEL | __GFP_NORETRY, 0);
2643 if (!page)
2644 goto failed;
2645
2646 event = page_address(page);
2647 memset(event, 0, sizeof(*event));
2648
2649 per_cpu(trace_buffered_event, cpu) = event;
2650
2651 preempt_disable();
2652 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002653 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002654 per_cpu(trace_buffered_event, cpu))
2655 WARN_ON_ONCE(1);
2656 preempt_enable();
2657 }
2658
2659 return;
2660 failed:
2661 trace_buffered_event_disable();
2662}
2663
2664static void enable_trace_buffered_event(void *data)
2665{
2666 /* Probably not needed, but do it anyway */
2667 smp_rmb();
2668 this_cpu_dec(trace_buffered_event_cnt);
2669}
2670
2671static void disable_trace_buffered_event(void *data)
2672{
2673 this_cpu_inc(trace_buffered_event_cnt);
2674}
2675
2676/**
2677 * trace_buffered_event_disable - disable buffering events
2678 *
2679 * When a filter is removed, it is faster to not use the buffered
2680 * events, and to commit directly into the ring buffer. Free up
2681 * the temp buffers when there are no more users. This requires
2682 * special synchronization with current events.
2683 */
2684void trace_buffered_event_disable(void)
2685{
2686 int cpu;
2687
2688 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2689
2690 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2691 return;
2692
2693 if (--trace_buffered_event_ref)
2694 return;
2695
2696 preempt_disable();
2697 /* For each CPU, set the buffer as used. */
2698 smp_call_function_many(tracing_buffer_mask,
2699 disable_trace_buffered_event, NULL, 1);
2700 preempt_enable();
2701
2702 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002703 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002704
2705 for_each_tracing_cpu(cpu) {
2706 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2707 per_cpu(trace_buffered_event, cpu) = NULL;
2708 }
2709 /*
2710 * Make sure trace_buffered_event is NULL before clearing
2711 * trace_buffered_event_cnt.
2712 */
2713 smp_wmb();
2714
2715 preempt_disable();
2716 /* Do the work on each cpu */
2717 smp_call_function_many(tracing_buffer_mask,
2718 enable_trace_buffered_event, NULL, 1);
2719 preempt_enable();
2720}
2721
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002722static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002723
Steven Rostedtef5580d2009-02-27 19:38:04 -05002724struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002725trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002726 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002727 int type, unsigned long len,
2728 unsigned long flags, int pc)
2729{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002730 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002731 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002732
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002733 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002734
Tom Zanussi00b41452018-01-15 20:51:39 -06002735 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002736 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2737 (entry = this_cpu_read(trace_buffered_event))) {
2738 /* Try to use the per cpu buffer first */
2739 val = this_cpu_inc_return(trace_buffered_event_cnt);
2740 if (val == 1) {
2741 trace_event_setup(entry, type, flags, pc);
2742 entry->array[0] = len;
2743 return entry;
2744 }
2745 this_cpu_dec(trace_buffered_event_cnt);
2746 }
2747
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002748 entry = __trace_buffer_lock_reserve(*current_rb,
2749 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002750 /*
2751 * If tracing is off, but we have triggers enabled
2752 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002753 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002754 * safe and will not be recorded anywhere.
2755 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002756 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002757 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002758 entry = __trace_buffer_lock_reserve(*current_rb,
2759 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002760 }
2761 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002762}
2763EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2764
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002765static DEFINE_SPINLOCK(tracepoint_iter_lock);
2766static DEFINE_MUTEX(tracepoint_printk_mutex);
2767
2768static void output_printk(struct trace_event_buffer *fbuffer)
2769{
2770 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002771 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002772 struct trace_event *event;
2773 unsigned long flags;
2774 struct trace_iterator *iter = tracepoint_print_iter;
2775
2776 /* We should never get here if iter is NULL */
2777 if (WARN_ON_ONCE(!iter))
2778 return;
2779
2780 event_call = fbuffer->trace_file->event_call;
2781 if (!event_call || !event_call->event.funcs ||
2782 !event_call->event.funcs->trace)
2783 return;
2784
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002785 file = fbuffer->trace_file;
2786 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2787 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2788 !filter_match_preds(file->filter, fbuffer->entry)))
2789 return;
2790
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002791 event = &fbuffer->trace_file->event_call->event;
2792
2793 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2794 trace_seq_init(&iter->seq);
2795 iter->ent = fbuffer->entry;
2796 event_call->event.funcs->trace(iter, 0, event);
2797 trace_seq_putc(&iter->seq, 0);
2798 printk("%s", iter->seq.buffer);
2799
2800 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2801}
2802
2803int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002804 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002805 loff_t *ppos)
2806{
2807 int save_tracepoint_printk;
2808 int ret;
2809
2810 mutex_lock(&tracepoint_printk_mutex);
2811 save_tracepoint_printk = tracepoint_printk;
2812
2813 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2814
2815 /*
2816 * This will force exiting early, as tracepoint_printk
2817 * is always zero when tracepoint_printk_iter is not allocated
2818 */
2819 if (!tracepoint_print_iter)
2820 tracepoint_printk = 0;
2821
2822 if (save_tracepoint_printk == tracepoint_printk)
2823 goto out;
2824
2825 if (tracepoint_printk)
2826 static_key_enable(&tracepoint_printk_key.key);
2827 else
2828 static_key_disable(&tracepoint_printk_key.key);
2829
2830 out:
2831 mutex_unlock(&tracepoint_printk_mutex);
2832
2833 return ret;
2834}
2835
2836void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2837{
2838 if (static_key_false(&tracepoint_printk_key.key))
2839 output_printk(fbuffer);
2840
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002841 if (static_branch_unlikely(&trace_event_exports_enabled))
2842 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002843 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002844 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002845 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002846}
2847EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2848
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002849/*
2850 * Skip 3:
2851 *
2852 * trace_buffer_unlock_commit_regs()
2853 * trace_event_buffer_commit()
2854 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302855 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002856# define STACK_SKIP 3
2857
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002858void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002859 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002860 struct ring_buffer_event *event,
2861 unsigned long flags, int pc,
2862 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002863{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002864 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002865
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002866 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002867 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002868 * Note, we can still get here via blktrace, wakeup tracer
2869 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002870 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002871 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002872 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002873 ftrace_trace_userstack(buffer, flags, pc);
2874}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002875
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002876/*
2877 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2878 */
2879void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002880trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002881 struct ring_buffer_event *event)
2882{
2883 __buffer_unlock_commit(buffer, event);
2884}
2885
Ingo Molnare309b412008-05-12 21:20:51 +02002886void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002887trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002888 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2889 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002890{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002891 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002892 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002893 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002894 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002895
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002896 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2897 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002898 if (!event)
2899 return;
2900 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002901 entry->ip = ip;
2902 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002903
Chunyan Zhang478409d2016-11-21 15:57:18 +08002904 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002905 if (static_branch_unlikely(&trace_function_exports_enabled))
2906 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002907 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002908 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909}
2910
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002911#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002912
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002913/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2914#define FTRACE_KSTACK_NESTING 4
2915
2916#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2917
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002918struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002919 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002920};
2921
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002922
2923struct ftrace_stacks {
2924 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2925};
2926
2927static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002928static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2929
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002930static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002931 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002932 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002933{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002934 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002935 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002936 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002937 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002938 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002939 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002940
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002941 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002942 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002943 * If regs is set, then these functions will not be in the way.
2944 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002945#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002946 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002947 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002948#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002949
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002950 preempt_disable_notrace();
2951
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002952 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2953
2954 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08002955 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002956 goto out;
2957
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002958 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002959 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2960 * interrupt will either see the value pre increment or post
2961 * increment. If the interrupt happens pre increment it will have
2962 * restored the counter when it returns. We just need a barrier to
2963 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002964 */
2965 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002966
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002967 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002968 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002969
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002970 if (regs) {
2971 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2972 size, skip);
2973 } else {
2974 nr_entries = stack_trace_save(fstack->calls, size, skip);
2975 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002976
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002977 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002978 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2979 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002980 if (!event)
2981 goto out;
2982 entry = ring_buffer_event_data(event);
2983
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002984 memcpy(&entry->caller, fstack->calls, size);
2985 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002986
Tom Zanussif306cc82013-10-24 08:34:17 -05002987 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002988 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002989
2990 out:
2991 /* Again, don't let gcc optimize things here */
2992 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002993 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002994 preempt_enable_notrace();
2995
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002996}
2997
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002998static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002999 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003000 unsigned long flags,
3001 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003002{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003003 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003004 return;
3005
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04003006 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003007}
3008
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003009void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3010 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04003011{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003012 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003013
3014 if (rcu_is_watching()) {
3015 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3016 return;
3017 }
3018
3019 /*
3020 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3021 * but if the above rcu_is_watching() failed, then the NMI
3022 * triggered someplace critical, and rcu_irq_enter() should
3023 * not be called from NMI.
3024 */
3025 if (unlikely(in_nmi()))
3026 return;
3027
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003028 rcu_irq_enter_irqson();
3029 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3030 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003031}
3032
Steven Rostedt03889382009-12-11 09:48:22 -05003033/**
3034 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003035 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003036 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003037void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003038{
3039 unsigned long flags;
3040
3041 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003042 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003043
3044 local_save_flags(flags);
3045
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003046#ifndef CONFIG_UNWINDER_ORC
3047 /* Skip 1 to skip this function. */
3048 skip++;
3049#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003050 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003051 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003052}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003053EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003054
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003055#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003056static DEFINE_PER_CPU(int, user_stack_count);
3057
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003058static void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003059ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003060{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003061 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003062 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003063 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003064
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003065 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003066 return;
3067
Steven Rostedtb6345872010-03-12 20:03:30 -05003068 /*
3069 * NMIs can not handle page faults, even with fix ups.
3070 * The save user stack can (and often does) fault.
3071 */
3072 if (unlikely(in_nmi()))
3073 return;
3074
Steven Rostedt91e86e52010-11-10 12:56:12 +01003075 /*
3076 * prevent recursion, since the user stack tracing may
3077 * trigger other kernel events.
3078 */
3079 preempt_disable();
3080 if (__this_cpu_read(user_stack_count))
3081 goto out;
3082
3083 __this_cpu_inc(user_stack_count);
3084
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003085 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3086 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003087 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003088 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003089 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003090
Steven Rostedt48659d32009-09-11 11:36:23 -04003091 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003092 memset(&entry->caller, 0, sizeof(entry->caller));
3093
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003094 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003095 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003096 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003097
Li Zefan1dbd1952010-12-09 15:47:56 +08003098 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003099 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003100 out:
3101 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003102}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003103#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003104static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003105 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003106{
Török Edwin02b67512008-11-22 13:28:47 +02003107}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003108#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003109
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003110#endif /* CONFIG_STACKTRACE */
3111
Steven Rostedt07d777f2011-09-22 14:01:55 -04003112/* created for use with alloc_percpu */
3113struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003114 int nesting;
3115 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003116};
3117
3118static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003119
3120/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003121 * Thise allows for lockless recording. If we're nested too deeply, then
3122 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003123 */
3124static char *get_trace_buf(void)
3125{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003126 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003127
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003128 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003129 return NULL;
3130
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003131 buffer->nesting++;
3132
3133 /* Interrupts must see nesting incremented before we use the buffer */
3134 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003135 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003136}
3137
3138static void put_trace_buf(void)
3139{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003140 /* Don't let the decrement of nesting leak before this */
3141 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003142 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003143}
3144
3145static int alloc_percpu_trace_buffer(void)
3146{
3147 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003148
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003149 if (trace_percpu_buffer)
3150 return 0;
3151
Steven Rostedt07d777f2011-09-22 14:01:55 -04003152 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003153 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003154 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003155
3156 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003157 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003158}
3159
Steven Rostedt81698832012-10-11 10:15:05 -04003160static int buffers_allocated;
3161
Steven Rostedt07d777f2011-09-22 14:01:55 -04003162void trace_printk_init_buffers(void)
3163{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003164 if (buffers_allocated)
3165 return;
3166
3167 if (alloc_percpu_trace_buffer())
3168 return;
3169
Steven Rostedt2184db42014-05-28 13:14:40 -04003170 /* trace_printk() is for debug use only. Don't use it in production. */
3171
Joe Perchesa395d6a2016-03-22 14:28:09 -07003172 pr_warn("\n");
3173 pr_warn("**********************************************************\n");
3174 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3175 pr_warn("** **\n");
3176 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3177 pr_warn("** **\n");
3178 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3179 pr_warn("** unsafe for production use. **\n");
3180 pr_warn("** **\n");
3181 pr_warn("** If you see this message and you are not debugging **\n");
3182 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3183 pr_warn("** **\n");
3184 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3185 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003186
Steven Rostedtb382ede62012-10-10 21:44:34 -04003187 /* Expand the buffers to set size */
3188 tracing_update_buffers();
3189
Steven Rostedt07d777f2011-09-22 14:01:55 -04003190 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003191
3192 /*
3193 * trace_printk_init_buffers() can be called by modules.
3194 * If that happens, then we need to start cmdline recording
3195 * directly here. If the global_trace.buffer is already
3196 * allocated here, then this was called by module code.
3197 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003198 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003199 tracing_start_cmdline_record();
3200}
Divya Indif45d1222019-03-20 11:28:51 -07003201EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003202
3203void trace_printk_start_comm(void)
3204{
3205 /* Start tracing comms if trace printk is set */
3206 if (!buffers_allocated)
3207 return;
3208 tracing_start_cmdline_record();
3209}
3210
3211static void trace_printk_start_stop_comm(int enabled)
3212{
3213 if (!buffers_allocated)
3214 return;
3215
3216 if (enabled)
3217 tracing_start_cmdline_record();
3218 else
3219 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003220}
3221
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003222/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003223 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003224 * @ip: The address of the caller
3225 * @fmt: The string format to write to the buffer
3226 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003227 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003228int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003229{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003230 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003231 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003232 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003233 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003234 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003235 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003236 char *tbuffer;
3237 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238
3239 if (unlikely(tracing_selftest_running || tracing_disabled))
3240 return 0;
3241
3242 /* Don't pollute graph traces with trace_vprintk internals */
3243 pause_graph_tracing();
3244
3245 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003246 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003247
Steven Rostedt07d777f2011-09-22 14:01:55 -04003248 tbuffer = get_trace_buf();
3249 if (!tbuffer) {
3250 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003251 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003252 }
3253
3254 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3255
3256 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003257 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003258
Steven Rostedt07d777f2011-09-22 14:01:55 -04003259 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003260 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003261 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003262 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003263 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3264 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003265 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003266 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003267 entry = ring_buffer_event_data(event);
3268 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003269 entry->fmt = fmt;
3270
Steven Rostedt07d777f2011-09-22 14:01:55 -04003271 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003272 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003273 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003274 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003275 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003276
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003277out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003278 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003279out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003280 put_trace_buf();
3281
3282out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003283 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003284 unpause_graph_tracing();
3285
3286 return len;
3287}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003288EXPORT_SYMBOL_GPL(trace_vbprintk);
3289
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003290__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003291static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003292__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003293 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003294{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003295 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003296 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003297 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003298 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003299 unsigned long flags;
3300 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003301
3302 if (tracing_disabled || tracing_selftest_running)
3303 return 0;
3304
Steven Rostedt07d777f2011-09-22 14:01:55 -04003305 /* Don't pollute graph traces with trace_vprintk internals */
3306 pause_graph_tracing();
3307
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003308 pc = preempt_count();
3309 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003310
Steven Rostedt07d777f2011-09-22 14:01:55 -04003311
3312 tbuffer = get_trace_buf();
3313 if (!tbuffer) {
3314 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003315 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003316 }
3317
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003318 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003319
Steven Rostedt07d777f2011-09-22 14:01:55 -04003320 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003321 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003322 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003323 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3324 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003325 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003326 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003327 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003328 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003329
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003330 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003331 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003332 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003333 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003334 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003335
3336out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003337 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003338 put_trace_buf();
3339
3340out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003341 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003342 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003343
3344 return len;
3345}
Steven Rostedt659372d2009-09-03 19:11:07 -04003346
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003347__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003348int trace_array_vprintk(struct trace_array *tr,
3349 unsigned long ip, const char *fmt, va_list args)
3350{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003351 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003352}
3353
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003354/**
3355 * trace_array_printk - Print a message to a specific instance
3356 * @tr: The instance trace_array descriptor
3357 * @ip: The instruction pointer that this is called from.
3358 * @fmt: The format to print (printf format)
3359 *
3360 * If a subsystem sets up its own instance, they have the right to
3361 * printk strings into their tracing instance buffer using this
3362 * function. Note, this function will not write into the top level
3363 * buffer (use trace_printk() for that), as writing into the top level
3364 * buffer should only have events that can be individually disabled.
3365 * trace_printk() is only used for debugging a kernel, and should not
3366 * be ever encorporated in normal use.
3367 *
3368 * trace_array_printk() can be used, as it will not add noise to the
3369 * top level tracing buffer.
3370 *
3371 * Note, trace_array_init_printk() must be called on @tr before this
3372 * can be used.
3373 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003374__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003375int trace_array_printk(struct trace_array *tr,
3376 unsigned long ip, const char *fmt, ...)
3377{
3378 int ret;
3379 va_list ap;
3380
Divya Indi953ae452019-08-14 10:55:25 -07003381 if (!tr)
3382 return -ENOENT;
3383
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003384 /* This is only allowed for created instances */
3385 if (tr == &global_trace)
3386 return 0;
3387
3388 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3389 return 0;
3390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003391 va_start(ap, fmt);
3392 ret = trace_array_vprintk(tr, ip, fmt, ap);
3393 va_end(ap);
3394 return ret;
3395}
Divya Indif45d1222019-03-20 11:28:51 -07003396EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003397
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003398/**
3399 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3400 * @tr: The trace array to initialize the buffers for
3401 *
3402 * As trace_array_printk() only writes into instances, they are OK to
3403 * have in the kernel (unlike trace_printk()). This needs to be called
3404 * before trace_array_printk() can be used on a trace_array.
3405 */
3406int trace_array_init_printk(struct trace_array *tr)
3407{
3408 if (!tr)
3409 return -ENOENT;
3410
3411 /* This is only allowed for created instances */
3412 if (tr == &global_trace)
3413 return -EINVAL;
3414
3415 return alloc_percpu_trace_buffer();
3416}
3417EXPORT_SYMBOL_GPL(trace_array_init_printk);
3418
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003419__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003420int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003421 unsigned long ip, const char *fmt, ...)
3422{
3423 int ret;
3424 va_list ap;
3425
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003426 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 return 0;
3428
3429 va_start(ap, fmt);
3430 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3431 va_end(ap);
3432 return ret;
3433}
3434
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003435__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003436int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3437{
Steven Rostedta813a152009-10-09 01:41:35 -04003438 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003439}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003440EXPORT_SYMBOL_GPL(trace_vprintk);
3441
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003442static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003443{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003444 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3445
Steven Rostedt5a90f572008-09-03 17:42:51 -04003446 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003447 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003448 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003449}
3450
Ingo Molnare309b412008-05-12 21:20:51 +02003451static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003452peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3453 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003454{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003455 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003456 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003457
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003458 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003459 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003460 if (lost_events)
3461 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3462 (unsigned long)-1 : 0;
3463 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003464 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003465 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003466 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003467
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003468 if (event) {
3469 iter->ent_size = ring_buffer_event_length(event);
3470 return ring_buffer_event_data(event);
3471 }
3472 iter->ent_size = 0;
3473 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003474}
Steven Rostedtd7690412008-10-01 00:29:53 -04003475
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003476static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003477__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3478 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003479{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003480 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003481 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003482 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003483 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003484 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003486 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 int cpu;
3488
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003489 /*
3490 * If we are in a per_cpu trace file, don't bother by iterating over
3491 * all cpu and peek directly.
3492 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003493 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003494 if (ring_buffer_empty_cpu(buffer, cpu_file))
3495 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003496 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003497 if (ent_cpu)
3498 *ent_cpu = cpu_file;
3499
3500 return ent;
3501 }
3502
Steven Rostedtab464282008-05-12 21:21:00 +02003503 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003504
3505 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003506 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003507
Steven Rostedtbc21b472010-03-31 19:49:26 -04003508 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003509
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003510 /*
3511 * Pick the entry with the smallest timestamp:
3512 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003513 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003514 next = ent;
3515 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003516 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003517 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003518 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003519 }
3520 }
3521
Steven Rostedt12b5da32012-03-27 10:43:28 -04003522 iter->ent_size = next_size;
3523
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003524 if (ent_cpu)
3525 *ent_cpu = next_cpu;
3526
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003527 if (ent_ts)
3528 *ent_ts = next_ts;
3529
Steven Rostedtbc21b472010-03-31 19:49:26 -04003530 if (missing_events)
3531 *missing_events = next_lost;
3532
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 return next;
3534}
3535
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003536#define STATIC_TEMP_BUF_SIZE 128
3537static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
3538
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003539/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003540struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3541 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003542{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003543 /* __find_next_entry will reset ent_size */
3544 int ent_size = iter->ent_size;
3545 struct trace_entry *entry;
3546
3547 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003548 * If called from ftrace_dump(), then the iter->temp buffer
3549 * will be the static_temp_buf and not created from kmalloc.
3550 * If the entry size is greater than the buffer, we can
3551 * not save it. Just return NULL in that case. This is only
3552 * used to add markers when two consecutive events' time
3553 * stamps have a large delta. See trace_print_lat_context()
3554 */
3555 if (iter->temp == static_temp_buf &&
3556 STATIC_TEMP_BUF_SIZE < ent_size)
3557 return NULL;
3558
3559 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003560 * The __find_next_entry() may call peek_next_entry(), which may
3561 * call ring_buffer_peek() that may make the contents of iter->ent
3562 * undefined. Need to copy iter->ent now.
3563 */
3564 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003565 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3566 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003567 void *temp;
3568 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3569 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003570 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003571 kfree(iter->temp);
3572 iter->temp = temp;
3573 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003574 }
3575 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003576 iter->ent = iter->temp;
3577 }
3578 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3579 /* Put back the original ent_size */
3580 iter->ent_size = ent_size;
3581
3582 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003583}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003584
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003585/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003586void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003587{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003588 iter->ent = __find_next_entry(iter, &iter->cpu,
3589 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003590
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003591 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003592 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003593
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003594 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003595}
3596
Ingo Molnare309b412008-05-12 21:20:51 +02003597static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003598{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003599 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003600 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003601}
3602
Ingo Molnare309b412008-05-12 21:20:51 +02003603static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604{
3605 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003606 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003607 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003608
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003609 WARN_ON_ONCE(iter->leftover);
3610
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003611 (*pos)++;
3612
3613 /* can't go backwards */
3614 if (iter->idx > i)
3615 return NULL;
3616
3617 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003618 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003619 else
3620 ent = iter;
3621
3622 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003623 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003624
3625 iter->pos = *pos;
3626
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627 return ent;
3628}
3629
Jason Wessel955b61e2010-08-05 09:22:23 -05003630void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003631{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003632 struct ring_buffer_iter *buf_iter;
3633 unsigned long entries = 0;
3634 u64 ts;
3635
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003636 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003637
Steven Rostedt6d158a82012-06-27 20:46:14 -04003638 buf_iter = trace_buffer_iter(iter, cpu);
3639 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003640 return;
3641
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003642 ring_buffer_iter_reset(buf_iter);
3643
3644 /*
3645 * We could have the case with the max latency tracers
3646 * that a reset never took place on a cpu. This is evident
3647 * by the timestamp being before the start of the buffer.
3648 */
YangHui69243722020-06-16 11:36:46 +08003649 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003650 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003651 break;
3652 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003653 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003654 }
3655
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003656 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003657}
3658
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003659/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003660 * The current tracer is copied to avoid a global locking
3661 * all around.
3662 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003663static void *s_start(struct seq_file *m, loff_t *pos)
3664{
3665 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003666 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003667 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003668 void *p = NULL;
3669 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003670 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003671
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003672 /*
3673 * copy the tracer to avoid using a global lock all around.
3674 * iter->trace is a copy of current_trace, the pointer to the
3675 * name may be used instead of a strcmp(), as iter->trace->name
3676 * will point to the same string as current_trace->name.
3677 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003678 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003679 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3680 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003681 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003682
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003683#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003684 if (iter->snapshot && iter->trace->use_max_tr)
3685 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003686#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003687
3688 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003689 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003690
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003691 if (*pos != iter->pos) {
3692 iter->ent = NULL;
3693 iter->cpu = 0;
3694 iter->idx = -1;
3695
Steven Rostedtae3b5092013-01-23 15:22:59 -05003696 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003697 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003698 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003699 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003700 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003701
Lai Jiangshanac91d852010-03-02 17:54:50 +08003702 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003703 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3704 ;
3705
3706 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003707 /*
3708 * If we overflowed the seq_file before, then we want
3709 * to just reuse the trace_seq buffer again.
3710 */
3711 if (iter->leftover)
3712 p = iter;
3713 else {
3714 l = *pos - 1;
3715 p = s_next(m, p, &l);
3716 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003717 }
3718
Lai Jiangshan4f535962009-05-18 19:35:34 +08003719 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003720 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003721 return p;
3722}
3723
3724static void s_stop(struct seq_file *m, void *p)
3725{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003726 struct trace_iterator *iter = m->private;
3727
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003728#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003729 if (iter->snapshot && iter->trace->use_max_tr)
3730 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003731#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003732
3733 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003734 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003735
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003736 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003737 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003738}
3739
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003740static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003741get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003742 unsigned long *entries, int cpu)
3743{
3744 unsigned long count;
3745
3746 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3747 /*
3748 * If this buffer has skipped entries, then we hold all
3749 * entries for the trace and we need to ignore the
3750 * ones before the time stamp.
3751 */
3752 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3753 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3754 /* total is the same as the entries */
3755 *total = count;
3756 } else
3757 *total = count +
3758 ring_buffer_overrun_cpu(buf->buffer, cpu);
3759 *entries = count;
3760}
3761
3762static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003763get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003764 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003765{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003766 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003767 int cpu;
3768
3769 *total = 0;
3770 *entries = 0;
3771
3772 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003773 get_total_entries_cpu(buf, &t, &e, cpu);
3774 *total += t;
3775 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003776 }
3777}
3778
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003779unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3780{
3781 unsigned long total, entries;
3782
3783 if (!tr)
3784 tr = &global_trace;
3785
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003786 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003787
3788 return entries;
3789}
3790
3791unsigned long trace_total_entries(struct trace_array *tr)
3792{
3793 unsigned long total, entries;
3794
3795 if (!tr)
3796 tr = &global_trace;
3797
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003798 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003799
3800 return entries;
3801}
3802
Ingo Molnare309b412008-05-12 21:20:51 +02003803static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003804{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003805 seq_puts(m, "# _------=> CPU# \n"
3806 "# / _-----=> irqs-off \n"
3807 "# | / _----=> need-resched \n"
3808 "# || / _---=> hardirq/softirq \n"
3809 "# ||| / _--=> preempt-depth \n"
3810 "# |||| / delay \n"
3811 "# cmd pid ||||| time | caller \n"
3812 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003813}
3814
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003815static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003816{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003817 unsigned long total;
3818 unsigned long entries;
3819
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003820 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003821 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3822 entries, total, num_online_cpus());
3823 seq_puts(m, "#\n");
3824}
3825
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003826static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003827 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003828{
Joel Fernandes441dae82017-06-25 22:38:43 -07003829 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3830
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003831 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003832
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003833 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3834 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003835}
3836
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003837static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003838 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003839{
Joel Fernandes441dae82017-06-25 22:38:43 -07003840 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003841 const char *space = " ";
3842 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003843
Quentin Perret9e738212019-02-14 15:29:50 +00003844 print_event_info(buf, m);
3845
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02003846 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3847 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3848 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3849 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3850 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3851 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3852 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003853}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003854
Jiri Olsa62b915f2010-04-02 19:01:22 +02003855void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003856print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3857{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003858 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003859 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003860 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003861 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003862 unsigned long entries;
3863 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003864 const char *name = "preemption";
3865
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003866 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003868 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003869
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003870 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003872 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003874 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003875 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003876 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003877 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003878 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003879 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003880#if defined(CONFIG_PREEMPT_NONE)
3881 "server",
3882#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3883 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003884#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003885 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003886#elif defined(CONFIG_PREEMPT_RT)
3887 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003888#else
3889 "unknown",
3890#endif
3891 /* These are reserved for later use */
3892 0, 0, 0, 0);
3893#ifdef CONFIG_SMP
3894 seq_printf(m, " #P:%d)\n", num_online_cpus());
3895#else
3896 seq_puts(m, ")\n");
3897#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003898 seq_puts(m, "# -----------------\n");
3899 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003900 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003901 data->comm, data->pid,
3902 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003903 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003904 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003905
3906 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003907 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003908 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3909 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003910 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003911 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3912 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003913 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914 }
3915
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003916 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917}
3918
Steven Rostedta3097202008-11-07 22:36:02 -05003919static void test_cpu_buff_start(struct trace_iterator *iter)
3920{
3921 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003922 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003923
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003924 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003925 return;
3926
3927 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3928 return;
3929
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003930 if (cpumask_available(iter->started) &&
3931 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003932 return;
3933
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003934 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003935 return;
3936
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003937 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003938 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003939
3940 /* Don't print started cpu buffer for the first entry of the trace */
3941 if (iter->idx > 1)
3942 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3943 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003944}
3945
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003946static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003947{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003948 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003949 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003950 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003951 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003952 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003953
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003954 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003955
Steven Rostedta3097202008-11-07 22:36:02 -05003956 test_cpu_buff_start(iter);
3957
Steven Rostedtf633cef2008-12-23 23:24:13 -05003958 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003959
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003960 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003961 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3962 trace_print_lat_context(iter);
3963 else
3964 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003965 }
3966
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003967 if (trace_seq_has_overflowed(s))
3968 return TRACE_TYPE_PARTIAL_LINE;
3969
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003970 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003971 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003972
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003973 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003974
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003975 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003976}
3977
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003978static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003979{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003980 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003981 struct trace_seq *s = &iter->seq;
3982 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003983 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003984
3985 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003986
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003987 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003988 trace_seq_printf(s, "%d %d %llu ",
3989 entry->pid, iter->cpu, iter->ts);
3990
3991 if (trace_seq_has_overflowed(s))
3992 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003993
Steven Rostedtf633cef2008-12-23 23:24:13 -05003994 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003995 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003996 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003997
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003998 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003999
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004000 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004001}
4002
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004003static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004004{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004005 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004006 struct trace_seq *s = &iter->seq;
4007 unsigned char newline = '\n';
4008 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004009 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004010
4011 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004012
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004013 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004014 SEQ_PUT_HEX_FIELD(s, entry->pid);
4015 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4016 SEQ_PUT_HEX_FIELD(s, iter->ts);
4017 if (trace_seq_has_overflowed(s))
4018 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004019 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004020
Steven Rostedtf633cef2008-12-23 23:24:13 -05004021 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004022 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004023 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004024 if (ret != TRACE_TYPE_HANDLED)
4025 return ret;
4026 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004027
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004028 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004029
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004030 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004031}
4032
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004033static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004034{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004035 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004036 struct trace_seq *s = &iter->seq;
4037 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004038 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004039
4040 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004041
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004042 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004043 SEQ_PUT_FIELD(s, entry->pid);
4044 SEQ_PUT_FIELD(s, iter->cpu);
4045 SEQ_PUT_FIELD(s, iter->ts);
4046 if (trace_seq_has_overflowed(s))
4047 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004048 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004049
Steven Rostedtf633cef2008-12-23 23:24:13 -05004050 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004051 return event ? event->funcs->binary(iter, 0, event) :
4052 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004053}
4054
Jiri Olsa62b915f2010-04-02 19:01:22 +02004055int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004056{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004057 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004058 int cpu;
4059
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004060 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004061 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004062 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004063 buf_iter = trace_buffer_iter(iter, cpu);
4064 if (buf_iter) {
4065 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004066 return 0;
4067 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004068 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004069 return 0;
4070 }
4071 return 1;
4072 }
4073
Steven Rostedtab464282008-05-12 21:21:00 +02004074 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004075 buf_iter = trace_buffer_iter(iter, cpu);
4076 if (buf_iter) {
4077 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004078 return 0;
4079 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004080 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004081 return 0;
4082 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004083 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004084
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004085 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086}
4087
Lai Jiangshan4f535962009-05-18 19:35:34 +08004088/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004089enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004090{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004091 struct trace_array *tr = iter->tr;
4092 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004093 enum print_line_t ret;
4094
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004095 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004096 if (iter->lost_events == (unsigned long)-1)
4097 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4098 iter->cpu);
4099 else
4100 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4101 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004102 if (trace_seq_has_overflowed(&iter->seq))
4103 return TRACE_TYPE_PARTIAL_LINE;
4104 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004105
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004106 if (iter->trace && iter->trace->print_line) {
4107 ret = iter->trace->print_line(iter);
4108 if (ret != TRACE_TYPE_UNHANDLED)
4109 return ret;
4110 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004111
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004112 if (iter->ent->type == TRACE_BPUTS &&
4113 trace_flags & TRACE_ITER_PRINTK &&
4114 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4115 return trace_print_bputs_msg_only(iter);
4116
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004117 if (iter->ent->type == TRACE_BPRINT &&
4118 trace_flags & TRACE_ITER_PRINTK &&
4119 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004120 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004121
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004122 if (iter->ent->type == TRACE_PRINT &&
4123 trace_flags & TRACE_ITER_PRINTK &&
4124 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004125 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004126
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004127 if (trace_flags & TRACE_ITER_BIN)
4128 return print_bin_fmt(iter);
4129
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004130 if (trace_flags & TRACE_ITER_HEX)
4131 return print_hex_fmt(iter);
4132
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004133 if (trace_flags & TRACE_ITER_RAW)
4134 return print_raw_fmt(iter);
4135
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004136 return print_trace_fmt(iter);
4137}
4138
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004139void trace_latency_header(struct seq_file *m)
4140{
4141 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004142 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004143
4144 /* print nothing if the buffers are empty */
4145 if (trace_empty(iter))
4146 return;
4147
4148 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4149 print_trace_header(m, iter);
4150
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004151 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004152 print_lat_help_header(m);
4153}
4154
Jiri Olsa62b915f2010-04-02 19:01:22 +02004155void trace_default_header(struct seq_file *m)
4156{
4157 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004158 struct trace_array *tr = iter->tr;
4159 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004160
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004161 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4162 return;
4163
Jiri Olsa62b915f2010-04-02 19:01:22 +02004164 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4165 /* print nothing if the buffers are empty */
4166 if (trace_empty(iter))
4167 return;
4168 print_trace_header(m, iter);
4169 if (!(trace_flags & TRACE_ITER_VERBOSE))
4170 print_lat_help_header(m);
4171 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004172 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4173 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004174 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004175 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004176 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004177 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004178 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004179 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004180 }
4181}
4182
Steven Rostedte0a413f2011-09-29 21:26:16 -04004183static void test_ftrace_alive(struct seq_file *m)
4184{
4185 if (!ftrace_is_dead())
4186 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004187 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4188 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004189}
4190
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004191#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004192static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004193{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004194 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4195 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4196 "# Takes a snapshot of the main buffer.\n"
4197 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4198 "# (Doesn't have to be '2' works with any number that\n"
4199 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004200}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004201
4202static void show_snapshot_percpu_help(struct seq_file *m)
4203{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004204 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004205#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004206 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4207 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004208#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004209 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4210 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004211#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004212 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4213 "# (Doesn't have to be '2' works with any number that\n"
4214 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004215}
4216
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004217static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4218{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004219 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004220 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004221 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004222 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004223
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004224 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004225 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4226 show_snapshot_main_help(m);
4227 else
4228 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004229}
4230#else
4231/* Should never be called */
4232static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4233#endif
4234
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004235static int s_show(struct seq_file *m, void *v)
4236{
4237 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004238 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239
4240 if (iter->ent == NULL) {
4241 if (iter->tr) {
4242 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4243 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004244 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004245 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004246 if (iter->snapshot && trace_empty(iter))
4247 print_snapshot_help(m, iter);
4248 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004249 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004250 else
4251 trace_default_header(m);
4252
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004253 } else if (iter->leftover) {
4254 /*
4255 * If we filled the seq_file buffer earlier, we
4256 * want to just show it now.
4257 */
4258 ret = trace_print_seq(m, &iter->seq);
4259
4260 /* ret should this time be zero, but you never know */
4261 iter->leftover = ret;
4262
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004263 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004264 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004265 ret = trace_print_seq(m, &iter->seq);
4266 /*
4267 * If we overflow the seq_file buffer, then it will
4268 * ask us for this data again at start up.
4269 * Use that instead.
4270 * ret is 0 if seq_file write succeeded.
4271 * -1 otherwise.
4272 */
4273 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004274 }
4275
4276 return 0;
4277}
4278
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004279/*
4280 * Should be used after trace_array_get(), trace_types_lock
4281 * ensures that i_cdev was already initialized.
4282 */
4283static inline int tracing_get_cpu(struct inode *inode)
4284{
4285 if (inode->i_cdev) /* See trace_create_cpu_file() */
4286 return (long)inode->i_cdev - 1;
4287 return RING_BUFFER_ALL_CPUS;
4288}
4289
James Morris88e9d342009-09-22 16:43:43 -07004290static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004291 .start = s_start,
4292 .next = s_next,
4293 .stop = s_stop,
4294 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004295};
4296
Ingo Molnare309b412008-05-12 21:20:51 +02004297static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004298__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004299{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004300 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004301 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004302 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004303
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004304 if (tracing_disabled)
4305 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004306
Jiri Olsa50e18b92012-04-25 10:23:39 +02004307 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004308 if (!iter)
4309 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004310
Gil Fruchter72917232015-06-09 10:32:35 +03004311 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004312 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004313 if (!iter->buffer_iter)
4314 goto release;
4315
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004316 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004317 * trace_find_next_entry() may need to save off iter->ent.
4318 * It will place it into the iter->temp buffer. As most
4319 * events are less than 128, allocate a buffer of that size.
4320 * If one is greater, then trace_find_next_entry() will
4321 * allocate a new buffer to adjust for the bigger iter->ent.
4322 * It's not critical if it fails to get allocated here.
4323 */
4324 iter->temp = kmalloc(128, GFP_KERNEL);
4325 if (iter->temp)
4326 iter->temp_size = 128;
4327
4328 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004329 * We make a copy of the current tracer to avoid concurrent
4330 * changes on it while we are reading.
4331 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004332 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004333 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004334 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004335 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004336
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004337 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004338
Li Zefan79f55992009-06-15 14:58:26 +08004339 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004340 goto fail;
4341
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004342 iter->tr = tr;
4343
4344#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004345 /* Currently only the top directory has a snapshot */
4346 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004347 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004348 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004349#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004350 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004351 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004352 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004353 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004354 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004355
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004356 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004357 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004358 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004359
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004360 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004361 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004362 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4363
David Sharp8be07092012-11-13 12:18:22 -08004364 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004365 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004366 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4367
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004368 /*
4369 * If pause-on-trace is enabled, then stop the trace while
4370 * dumping, unless this is the "snapshot" file
4371 */
4372 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004373 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004374
Steven Rostedtae3b5092013-01-23 15:22:59 -05004375 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004376 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004377 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004378 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004379 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004380 }
4381 ring_buffer_read_prepare_sync();
4382 for_each_tracing_cpu(cpu) {
4383 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004384 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004385 }
4386 } else {
4387 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004388 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004389 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004390 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004391 ring_buffer_read_prepare_sync();
4392 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004393 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004394 }
4395
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004396 mutex_unlock(&trace_types_lock);
4397
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004398 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004399
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004400 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004401 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004402 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004403 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004404 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004405release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004406 seq_release_private(inode, file);
4407 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004408}
4409
4410int tracing_open_generic(struct inode *inode, struct file *filp)
4411{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004412 int ret;
4413
4414 ret = tracing_check_open_get_tr(NULL);
4415 if (ret)
4416 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004417
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004418 filp->private_data = inode->i_private;
4419 return 0;
4420}
4421
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004422bool tracing_is_disabled(void)
4423{
4424 return (tracing_disabled) ? true: false;
4425}
4426
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004427/*
4428 * Open and update trace_array ref count.
4429 * Must have the current trace_array passed to it.
4430 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004431int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004432{
4433 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004434 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004435
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004436 ret = tracing_check_open_get_tr(tr);
4437 if (ret)
4438 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004439
4440 filp->private_data = inode->i_private;
4441
4442 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004443}
4444
Hannes Eder4fd27352009-02-10 19:44:12 +01004445static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004446{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004447 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004448 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004449 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004450 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004451
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004452 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004453 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004454 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004455 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004456
Oleg Nesterov6484c712013-07-23 17:26:10 +02004457 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004458 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004459 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004460
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004461 for_each_tracing_cpu(cpu) {
4462 if (iter->buffer_iter[cpu])
4463 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4464 }
4465
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004466 if (iter->trace && iter->trace->close)
4467 iter->trace->close(iter);
4468
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004469 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004470 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004471 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004472
4473 __trace_array_put(tr);
4474
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004475 mutex_unlock(&trace_types_lock);
4476
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004477 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004478 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004479 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004480 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004481 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004482 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004483
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004484 return 0;
4485}
4486
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004487static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4488{
4489 struct trace_array *tr = inode->i_private;
4490
4491 trace_array_put(tr);
4492 return 0;
4493}
4494
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004495static int tracing_single_release_tr(struct inode *inode, struct file *file)
4496{
4497 struct trace_array *tr = inode->i_private;
4498
4499 trace_array_put(tr);
4500
4501 return single_release(inode, file);
4502}
4503
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004504static int tracing_open(struct inode *inode, struct file *file)
4505{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004506 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004507 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004508 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004509
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004510 ret = tracing_check_open_get_tr(tr);
4511 if (ret)
4512 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004513
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004514 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004515 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4516 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004517 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004518
4519#ifdef CONFIG_TRACER_MAX_TRACE
4520 if (tr->current_trace->print_max)
4521 trace_buf = &tr->max_buffer;
4522#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004523
4524 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004525 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004526 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004527 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004528 }
4529
4530 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004531 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004532 if (IS_ERR(iter))
4533 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004534 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004535 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4536 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004537
4538 if (ret < 0)
4539 trace_array_put(tr);
4540
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004541 return ret;
4542}
4543
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004544/*
4545 * Some tracers are not suitable for instance buffers.
4546 * A tracer is always available for the global array (toplevel)
4547 * or if it explicitly states that it is.
4548 */
4549static bool
4550trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4551{
4552 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4553}
4554
4555/* Find the next tracer that this trace array may use */
4556static struct tracer *
4557get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4558{
4559 while (t && !trace_ok_for_array(t, tr))
4560 t = t->next;
4561
4562 return t;
4563}
4564
Ingo Molnare309b412008-05-12 21:20:51 +02004565static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004566t_next(struct seq_file *m, void *v, loff_t *pos)
4567{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004568 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004569 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004570
4571 (*pos)++;
4572
4573 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004574 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004575
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004576 return t;
4577}
4578
4579static void *t_start(struct seq_file *m, loff_t *pos)
4580{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004581 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004582 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004583 loff_t l = 0;
4584
4585 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004586
4587 t = get_tracer_for_array(tr, trace_types);
4588 for (; t && l < *pos; t = t_next(m, t, &l))
4589 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004590
4591 return t;
4592}
4593
4594static void t_stop(struct seq_file *m, void *p)
4595{
4596 mutex_unlock(&trace_types_lock);
4597}
4598
4599static int t_show(struct seq_file *m, void *v)
4600{
4601 struct tracer *t = v;
4602
4603 if (!t)
4604 return 0;
4605
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004606 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004607 if (t->next)
4608 seq_putc(m, ' ');
4609 else
4610 seq_putc(m, '\n');
4611
4612 return 0;
4613}
4614
James Morris88e9d342009-09-22 16:43:43 -07004615static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004616 .start = t_start,
4617 .next = t_next,
4618 .stop = t_stop,
4619 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004620};
4621
4622static int show_traces_open(struct inode *inode, struct file *file)
4623{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004624 struct trace_array *tr = inode->i_private;
4625 struct seq_file *m;
4626 int ret;
4627
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004628 ret = tracing_check_open_get_tr(tr);
4629 if (ret)
4630 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004631
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004632 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004633 if (ret) {
4634 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004635 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004636 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004637
4638 m = file->private_data;
4639 m->private = tr;
4640
4641 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004642}
4643
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004644static int show_traces_release(struct inode *inode, struct file *file)
4645{
4646 struct trace_array *tr = inode->i_private;
4647
4648 trace_array_put(tr);
4649 return seq_release(inode, file);
4650}
4651
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004652static ssize_t
4653tracing_write_stub(struct file *filp, const char __user *ubuf,
4654 size_t count, loff_t *ppos)
4655{
4656 return count;
4657}
4658
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004659loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004660{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004661 int ret;
4662
Slava Pestov364829b2010-11-24 15:13:16 -08004663 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004664 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004665 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004666 file->f_pos = ret = 0;
4667
4668 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004669}
4670
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004671static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004672 .open = tracing_open,
4673 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004674 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004675 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004676 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004677};
4678
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004679static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004680 .open = show_traces_open,
4681 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004682 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004683 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004684};
4685
4686static ssize_t
4687tracing_cpumask_read(struct file *filp, char __user *ubuf,
4688 size_t count, loff_t *ppos)
4689{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004690 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004691 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004692 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004693
Changbin Du90e406f2017-11-30 11:39:43 +08004694 len = snprintf(NULL, 0, "%*pb\n",
4695 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4696 mask_str = kmalloc(len, GFP_KERNEL);
4697 if (!mask_str)
4698 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004699
Changbin Du90e406f2017-11-30 11:39:43 +08004700 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004701 cpumask_pr_args(tr->tracing_cpumask));
4702 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004703 count = -EINVAL;
4704 goto out_err;
4705 }
Changbin Du90e406f2017-11-30 11:39:43 +08004706 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004707
4708out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004709 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004710
4711 return count;
4712}
4713
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004714int tracing_set_cpumask(struct trace_array *tr,
4715 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004716{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004717 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304718
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004719 if (!tr)
4720 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004721
Steven Rostedta5e25882008-12-02 15:34:05 -05004722 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004723 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004724 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004725 /*
4726 * Increase/decrease the disabled counter if we are
4727 * about to flip a bit in the cpumask:
4728 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004729 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304730 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004731 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4732 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004733 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004734 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304735 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004736 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4737 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004738 }
4739 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004740 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004741 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004742
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004743 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004744
4745 return 0;
4746}
4747
4748static ssize_t
4749tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4750 size_t count, loff_t *ppos)
4751{
4752 struct trace_array *tr = file_inode(filp)->i_private;
4753 cpumask_var_t tracing_cpumask_new;
4754 int err;
4755
4756 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4757 return -ENOMEM;
4758
4759 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4760 if (err)
4761 goto err_free;
4762
4763 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4764 if (err)
4765 goto err_free;
4766
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304767 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004768
Ingo Molnarc7078de2008-05-12 21:20:52 +02004769 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004770
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004771err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004772 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004773
4774 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004775}
4776
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004777static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004778 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004779 .read = tracing_cpumask_read,
4780 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004781 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004782 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004783};
4784
Li Zefanfdb372e2009-12-08 11:15:59 +08004785static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004786{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004787 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004788 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004789 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004790 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004791
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004792 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004793 tracer_flags = tr->current_trace->flags->val;
4794 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004795
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004796 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004797 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004798 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004799 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004800 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004801 }
4802
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004803 for (i = 0; trace_opts[i].name; i++) {
4804 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004805 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004806 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004807 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004808 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004809 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004810
Li Zefanfdb372e2009-12-08 11:15:59 +08004811 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004812}
4813
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004814static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004815 struct tracer_flags *tracer_flags,
4816 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004817{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004818 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004819 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004820
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004821 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004822 if (ret)
4823 return ret;
4824
4825 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004826 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004827 else
Zhaolei77708412009-08-07 18:53:21 +08004828 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004829 return 0;
4830}
4831
Li Zefan8d18eaa2009-12-08 11:17:06 +08004832/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004833static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004834{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004835 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004836 struct tracer_flags *tracer_flags = trace->flags;
4837 struct tracer_opt *opts = NULL;
4838 int i;
4839
4840 for (i = 0; tracer_flags->opts[i].name; i++) {
4841 opts = &tracer_flags->opts[i];
4842
4843 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004844 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004845 }
4846
4847 return -EINVAL;
4848}
4849
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004850/* Some tracers require overwrite to stay enabled */
4851int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4852{
4853 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4854 return -1;
4855
4856 return 0;
4857}
4858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004859int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004860{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004861 if ((mask == TRACE_ITER_RECORD_TGID) ||
4862 (mask == TRACE_ITER_RECORD_CMD))
4863 lockdep_assert_held(&event_mutex);
4864
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004865 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004866 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004867 return 0;
4868
4869 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004870 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004871 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004872 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004873
4874 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004875 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004876 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004877 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004878
4879 if (mask == TRACE_ITER_RECORD_CMD)
4880 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004881
Joel Fernandesd914ba32017-06-26 19:01:55 -07004882 if (mask == TRACE_ITER_RECORD_TGID) {
4883 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004884 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004885 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004886 GFP_KERNEL);
4887 if (!tgid_map) {
4888 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4889 return -ENOMEM;
4890 }
4891
4892 trace_event_enable_tgid_record(enabled);
4893 }
4894
Steven Rostedtc37775d2016-04-13 16:59:18 -04004895 if (mask == TRACE_ITER_EVENT_FORK)
4896 trace_event_follow_fork(tr, enabled);
4897
Namhyung Kim1e104862017-04-17 11:44:28 +09004898 if (mask == TRACE_ITER_FUNC_FORK)
4899 ftrace_pid_follow_fork(tr, enabled);
4900
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004901 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004902 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004903#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004904 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004905#endif
4906 }
Steven Rostedt81698832012-10-11 10:15:05 -04004907
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004908 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004909 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004910 trace_printk_control(enabled);
4911 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004912
4913 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004914}
4915
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004916int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004917{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004918 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004919 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004920 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004921 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004922 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004923
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004924 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004925
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004926 len = str_has_prefix(cmp, "no");
4927 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004928 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004929
4930 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004931
Prateek Sood3a53acf2019-12-10 09:15:16 +00004932 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004933 mutex_lock(&trace_types_lock);
4934
Yisheng Xie591a0332018-05-17 16:36:03 +08004935 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004936 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004937 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004938 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004939 else
4940 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004941
4942 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004943 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004944
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004945 /*
4946 * If the first trailing whitespace is replaced with '\0' by strstrip,
4947 * turn it back into a space.
4948 */
4949 if (orig_len > strlen(option))
4950 option[strlen(option)] = ' ';
4951
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004952 return ret;
4953}
4954
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004955static void __init apply_trace_boot_options(void)
4956{
4957 char *buf = trace_boot_options_buf;
4958 char *option;
4959
4960 while (true) {
4961 option = strsep(&buf, ",");
4962
4963 if (!option)
4964 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004965
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004966 if (*option)
4967 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004968
4969 /* Put back the comma to allow this to be called again */
4970 if (buf)
4971 *(buf - 1) = ',';
4972 }
4973}
4974
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004975static ssize_t
4976tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4977 size_t cnt, loff_t *ppos)
4978{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004979 struct seq_file *m = filp->private_data;
4980 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004981 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004982 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004983
4984 if (cnt >= sizeof(buf))
4985 return -EINVAL;
4986
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004987 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004988 return -EFAULT;
4989
Steven Rostedta8dd2172013-01-09 20:54:17 -05004990 buf[cnt] = 0;
4991
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004992 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004993 if (ret < 0)
4994 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004995
Jiri Olsacf8517c2009-10-23 19:36:16 -04004996 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004997
4998 return cnt;
4999}
5000
Li Zefanfdb372e2009-12-08 11:15:59 +08005001static int tracing_trace_options_open(struct inode *inode, struct file *file)
5002{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005003 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005004 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005005
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005006 ret = tracing_check_open_get_tr(tr);
5007 if (ret)
5008 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005009
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005010 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5011 if (ret < 0)
5012 trace_array_put(tr);
5013
5014 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005015}
5016
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005017static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005018 .open = tracing_trace_options_open,
5019 .read = seq_read,
5020 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005021 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005022 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005023};
5024
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005025static const char readme_msg[] =
5026 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005027 "# echo 0 > tracing_on : quick way to disable tracing\n"
5028 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5029 " Important files:\n"
5030 " trace\t\t\t- The static contents of the buffer\n"
5031 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5032 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5033 " current_tracer\t- function and latency tracers\n"
5034 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005035 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005036 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5037 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5038 " trace_clock\t\t-change the clock used to order events\n"
5039 " local: Per cpu clock but may not be synced across CPUs\n"
5040 " global: Synced across CPUs but slows tracing down.\n"
5041 " counter: Not a clock, but just an increment\n"
5042 " uptime: Jiffy counter from time of boot\n"
5043 " perf: Same clock that perf events use\n"
5044#ifdef CONFIG_X86_64
5045 " x86-tsc: TSC cycle counter\n"
5046#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005047 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5048 " delta: Delta difference against a buffer-wide timestamp\n"
5049 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005050 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005051 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005052 " tracing_cpumask\t- Limit which CPUs to trace\n"
5053 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5054 "\t\t\t Remove sub-buffer with rmdir\n"
5055 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005056 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005057 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005058 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005059#ifdef CONFIG_DYNAMIC_FTRACE
5060 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005061 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5062 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005063 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005064 "\t modules: Can select a group via module\n"
5065 "\t Format: :mod:<module-name>\n"
5066 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5067 "\t triggers: a command to perform when function is hit\n"
5068 "\t Format: <function>:<trigger>[:count]\n"
5069 "\t trigger: traceon, traceoff\n"
5070 "\t\t enable_event:<system>:<event>\n"
5071 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005072#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005073 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005074#endif
5075#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005076 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005077#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005078 "\t\t dump\n"
5079 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005080 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5081 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5082 "\t The first one will disable tracing every time do_fault is hit\n"
5083 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5084 "\t The first time do trap is hit and it disables tracing, the\n"
5085 "\t counter will decrement to 2. If tracing is already disabled,\n"
5086 "\t the counter will not decrement. It only decrements when the\n"
5087 "\t trigger did work\n"
5088 "\t To remove trigger without count:\n"
5089 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5090 "\t To remove trigger with a count:\n"
5091 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005092 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005093 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5094 "\t modules: Can select a group via module command :mod:\n"
5095 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005096#endif /* CONFIG_DYNAMIC_FTRACE */
5097#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005098 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5099 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005100 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5101 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005102#endif
5103#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5104 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005105 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005106 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5107#endif
5108#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005109 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5110 "\t\t\t snapshot buffer. Read the contents for more\n"
5111 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005112#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005113#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005114 " stack_trace\t\t- Shows the max stack trace when active\n"
5115 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005116 "\t\t\t Write into this file to reset the max size (trigger a\n"
5117 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005118#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005119 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5120 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005121#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005122#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005123#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005124 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005125 "\t\t\t Write into this file to define/undefine new trace events.\n"
5126#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005127#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005128 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005129 "\t\t\t Write into this file to define/undefine new trace events.\n"
5130#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005131#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005132 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005133 "\t\t\t Write into this file to define/undefine new trace events.\n"
5134#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005135#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005136 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005137 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5138 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005139#ifdef CONFIG_HIST_TRIGGERS
5140 "\t s:[synthetic/]<event> <field> [<field>]\n"
5141#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005142 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005143#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005144 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005145 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005146#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005147#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005148 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005149#endif
5150 "\t args: <name>=fetcharg[:type]\n"
5151 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005152#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005153 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005154#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005155 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005156#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005157 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005158 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005159 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005160 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005161#ifdef CONFIG_HIST_TRIGGERS
5162 "\t field: <stype> <name>;\n"
5163 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5164 "\t [unsigned] char/int/long\n"
5165#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005166#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005167 " events/\t\t- Directory containing all trace event subsystems:\n"
5168 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5169 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005170 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5171 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005172 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005173 " events/<system>/<event>/\t- Directory containing control files for\n"
5174 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005175 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5176 " filter\t\t- If set, only events passing filter are traced\n"
5177 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005178 "\t Format: <trigger>[:count][if <filter>]\n"
5179 "\t trigger: traceon, traceoff\n"
5180 "\t enable_event:<system>:<event>\n"
5181 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005182#ifdef CONFIG_HIST_TRIGGERS
5183 "\t enable_hist:<system>:<event>\n"
5184 "\t disable_hist:<system>:<event>\n"
5185#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005186#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005187 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005188#endif
5189#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005190 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005191#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005192#ifdef CONFIG_HIST_TRIGGERS
5193 "\t\t hist (see below)\n"
5194#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005195 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5196 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5197 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5198 "\t events/block/block_unplug/trigger\n"
5199 "\t The first disables tracing every time block_unplug is hit.\n"
5200 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5201 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5202 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5203 "\t Like function triggers, the counter is only decremented if it\n"
5204 "\t enabled or disabled tracing.\n"
5205 "\t To remove a trigger without a count:\n"
5206 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5207 "\t To remove a trigger with a count:\n"
5208 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5209 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005210#ifdef CONFIG_HIST_TRIGGERS
5211 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005212 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005213 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005214 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005215 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005216 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005217 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005218 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005219 "\t [if <filter>]\n\n"
5220 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005221 "\t table using the key(s) and value(s) named, and the value of a\n"
5222 "\t sum called 'hitcount' is incremented. Keys and values\n"
5223 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005224 "\t can be any field, or the special string 'stacktrace'.\n"
5225 "\t Compound keys consisting of up to two fields can be specified\n"
5226 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5227 "\t fields. Sort keys consisting of up to two fields can be\n"
5228 "\t specified using the 'sort' keyword. The sort direction can\n"
5229 "\t be modified by appending '.descending' or '.ascending' to a\n"
5230 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005231 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5232 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5233 "\t its histogram data will be shared with other triggers of the\n"
5234 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005235 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005236 "\t table in its entirety to stdout. If there are multiple hist\n"
5237 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005238 "\t trigger in the output. The table displayed for a named\n"
5239 "\t trigger will be the same as any other instance having the\n"
5240 "\t same name. The default format used to display a given field\n"
5241 "\t can be modified by appending any of the following modifiers\n"
5242 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005243 "\t .hex display a number as a hex value\n"
5244 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005245 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005246 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005247 "\t .syscall display a syscall id as a syscall name\n"
5248 "\t .log2 display log2 value rather than raw number\n"
5249 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005250 "\t The 'pause' parameter can be used to pause an existing hist\n"
5251 "\t trigger or to start a hist trigger but not log any events\n"
5252 "\t until told to do so. 'continue' can be used to start or\n"
5253 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005254 "\t The 'clear' parameter will clear the contents of a running\n"
5255 "\t hist trigger and leave its current paused/active state\n"
5256 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005257 "\t The enable_hist and disable_hist triggers can be used to\n"
5258 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005259 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005260 "\t the enable_event and disable_event triggers.\n\n"
5261 "\t Hist trigger handlers and actions are executed whenever a\n"
5262 "\t a histogram entry is added or updated. They take the form:\n\n"
5263 "\t <handler>.<action>\n\n"
5264 "\t The available handlers are:\n\n"
5265 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005266 "\t onmax(var) - invoke if var exceeds current max\n"
5267 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005268 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005269 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005270 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005271#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005272 "\t snapshot() - snapshot the trace buffer\n\n"
5273#endif
5274#ifdef CONFIG_SYNTH_EVENTS
5275 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5276 "\t Write into this file to define/undefine new synthetic events.\n"
5277 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005278#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005279#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005280;
5281
5282static ssize_t
5283tracing_readme_read(struct file *filp, char __user *ubuf,
5284 size_t cnt, loff_t *ppos)
5285{
5286 return simple_read_from_buffer(ubuf, cnt, ppos,
5287 readme_msg, strlen(readme_msg));
5288}
5289
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005290static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005291 .open = tracing_open_generic,
5292 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005293 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005294};
5295
Michael Sartain99c621d2017-07-05 22:07:15 -06005296static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5297{
5298 int *ptr = v;
5299
5300 if (*pos || m->count)
5301 ptr++;
5302
5303 (*pos)++;
5304
5305 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5306 if (trace_find_tgid(*ptr))
5307 return ptr;
5308 }
5309
5310 return NULL;
5311}
5312
5313static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5314{
5315 void *v;
5316 loff_t l = 0;
5317
5318 if (!tgid_map)
5319 return NULL;
5320
5321 v = &tgid_map[0];
5322 while (l <= *pos) {
5323 v = saved_tgids_next(m, v, &l);
5324 if (!v)
5325 return NULL;
5326 }
5327
5328 return v;
5329}
5330
5331static void saved_tgids_stop(struct seq_file *m, void *v)
5332{
5333}
5334
5335static int saved_tgids_show(struct seq_file *m, void *v)
5336{
5337 int pid = (int *)v - tgid_map;
5338
5339 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5340 return 0;
5341}
5342
5343static const struct seq_operations tracing_saved_tgids_seq_ops = {
5344 .start = saved_tgids_start,
5345 .stop = saved_tgids_stop,
5346 .next = saved_tgids_next,
5347 .show = saved_tgids_show,
5348};
5349
5350static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5351{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005352 int ret;
5353
5354 ret = tracing_check_open_get_tr(NULL);
5355 if (ret)
5356 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005357
5358 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5359}
5360
5361
5362static const struct file_operations tracing_saved_tgids_fops = {
5363 .open = tracing_saved_tgids_open,
5364 .read = seq_read,
5365 .llseek = seq_lseek,
5366 .release = seq_release,
5367};
5368
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005369static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005370{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005371 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005372
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005373 if (*pos || m->count)
5374 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005375
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005376 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005377
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005378 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5379 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005380 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005381 continue;
5382
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005383 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005384 }
5385
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005386 return NULL;
5387}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005388
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005389static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5390{
5391 void *v;
5392 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005393
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005394 preempt_disable();
5395 arch_spin_lock(&trace_cmdline_lock);
5396
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005397 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005398 while (l <= *pos) {
5399 v = saved_cmdlines_next(m, v, &l);
5400 if (!v)
5401 return NULL;
5402 }
5403
5404 return v;
5405}
5406
5407static void saved_cmdlines_stop(struct seq_file *m, void *v)
5408{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005409 arch_spin_unlock(&trace_cmdline_lock);
5410 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005411}
5412
5413static int saved_cmdlines_show(struct seq_file *m, void *v)
5414{
5415 char buf[TASK_COMM_LEN];
5416 unsigned int *pid = v;
5417
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005418 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005419 seq_printf(m, "%d %s\n", *pid, buf);
5420 return 0;
5421}
5422
5423static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5424 .start = saved_cmdlines_start,
5425 .next = saved_cmdlines_next,
5426 .stop = saved_cmdlines_stop,
5427 .show = saved_cmdlines_show,
5428};
5429
5430static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5431{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005432 int ret;
5433
5434 ret = tracing_check_open_get_tr(NULL);
5435 if (ret)
5436 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005437
5438 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005439}
5440
5441static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005442 .open = tracing_saved_cmdlines_open,
5443 .read = seq_read,
5444 .llseek = seq_lseek,
5445 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005446};
5447
5448static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005449tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5450 size_t cnt, loff_t *ppos)
5451{
5452 char buf[64];
5453 int r;
5454
5455 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005456 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005457 arch_spin_unlock(&trace_cmdline_lock);
5458
5459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5460}
5461
5462static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5463{
5464 kfree(s->saved_cmdlines);
5465 kfree(s->map_cmdline_to_pid);
5466 kfree(s);
5467}
5468
5469static int tracing_resize_saved_cmdlines(unsigned int val)
5470{
5471 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5472
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005473 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005474 if (!s)
5475 return -ENOMEM;
5476
5477 if (allocate_cmdlines_buffer(val, s) < 0) {
5478 kfree(s);
5479 return -ENOMEM;
5480 }
5481
5482 arch_spin_lock(&trace_cmdline_lock);
5483 savedcmd_temp = savedcmd;
5484 savedcmd = s;
5485 arch_spin_unlock(&trace_cmdline_lock);
5486 free_saved_cmdlines_buffer(savedcmd_temp);
5487
5488 return 0;
5489}
5490
5491static ssize_t
5492tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5493 size_t cnt, loff_t *ppos)
5494{
5495 unsigned long val;
5496 int ret;
5497
5498 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5499 if (ret)
5500 return ret;
5501
5502 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5503 if (!val || val > PID_MAX_DEFAULT)
5504 return -EINVAL;
5505
5506 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5507 if (ret < 0)
5508 return ret;
5509
5510 *ppos += cnt;
5511
5512 return cnt;
5513}
5514
5515static const struct file_operations tracing_saved_cmdlines_size_fops = {
5516 .open = tracing_open_generic,
5517 .read = tracing_saved_cmdlines_size_read,
5518 .write = tracing_saved_cmdlines_size_write,
5519};
5520
Jeremy Linton681bec02017-05-31 16:56:53 -05005521#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005522static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005523update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005524{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005525 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005526 if (ptr->tail.next) {
5527 ptr = ptr->tail.next;
5528 /* Set ptr to the next real item (skip head) */
5529 ptr++;
5530 } else
5531 return NULL;
5532 }
5533 return ptr;
5534}
5535
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005536static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005537{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005538 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005539
5540 /*
5541 * Paranoid! If ptr points to end, we don't want to increment past it.
5542 * This really should never happen.
5543 */
Vasily Averin039958a2020-01-24 10:03:01 +03005544 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005545 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005546 if (WARN_ON_ONCE(!ptr))
5547 return NULL;
5548
5549 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005550 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005551
5552 return ptr;
5553}
5554
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005555static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005556{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005557 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005558 loff_t l = 0;
5559
Jeremy Linton1793ed92017-05-31 16:56:46 -05005560 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005561
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005562 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005563 if (v)
5564 v++;
5565
5566 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005567 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005568 }
5569
5570 return v;
5571}
5572
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005573static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005574{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005575 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005576}
5577
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005578static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005579{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005580 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005581
5582 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005583 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005584 ptr->map.system);
5585
5586 return 0;
5587}
5588
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005589static const struct seq_operations tracing_eval_map_seq_ops = {
5590 .start = eval_map_start,
5591 .next = eval_map_next,
5592 .stop = eval_map_stop,
5593 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005594};
5595
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005596static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005597{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005598 int ret;
5599
5600 ret = tracing_check_open_get_tr(NULL);
5601 if (ret)
5602 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005603
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005604 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005605}
5606
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005607static const struct file_operations tracing_eval_map_fops = {
5608 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005609 .read = seq_read,
5610 .llseek = seq_lseek,
5611 .release = seq_release,
5612};
5613
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005614static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005615trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005616{
5617 /* Return tail of array given the head */
5618 return ptr + ptr->head.length + 1;
5619}
5620
5621static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005622trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005623 int len)
5624{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005625 struct trace_eval_map **stop;
5626 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005627 union trace_eval_map_item *map_array;
5628 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005629
5630 stop = start + len;
5631
5632 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005633 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005634 * where the head holds the module and length of array, and the
5635 * tail holds a pointer to the next list.
5636 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005637 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005638 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005639 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005640 return;
5641 }
5642
Jeremy Linton1793ed92017-05-31 16:56:46 -05005643 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005644
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005645 if (!trace_eval_maps)
5646 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005647 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005648 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005649 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005650 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005651 if (!ptr->tail.next)
5652 break;
5653 ptr = ptr->tail.next;
5654
5655 }
5656 ptr->tail.next = map_array;
5657 }
5658 map_array->head.mod = mod;
5659 map_array->head.length = len;
5660 map_array++;
5661
5662 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5663 map_array->map = **map;
5664 map_array++;
5665 }
5666 memset(map_array, 0, sizeof(*map_array));
5667
Jeremy Linton1793ed92017-05-31 16:56:46 -05005668 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005669}
5670
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005671static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005672{
Jeremy Linton681bec02017-05-31 16:56:53 -05005673 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005674 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005675}
5676
Jeremy Linton681bec02017-05-31 16:56:53 -05005677#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005678static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5679static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005680 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005681#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005682
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005683static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005684 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005685{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005686 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005687
5688 if (len <= 0)
5689 return;
5690
5691 map = start;
5692
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005693 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005694
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005695 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005696}
5697
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005698static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005699tracing_set_trace_read(struct file *filp, char __user *ubuf,
5700 size_t cnt, loff_t *ppos)
5701{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005702 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005703 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005704 int r;
5705
5706 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005707 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005708 mutex_unlock(&trace_types_lock);
5709
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005710 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005711}
5712
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005713int tracer_init(struct tracer *t, struct trace_array *tr)
5714{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005715 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005716 return t->init(tr);
5717}
5718
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005719static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005720{
5721 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005722
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005723 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005724 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005725}
5726
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005727#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005728/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005729static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5730 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005731{
5732 int cpu, ret = 0;
5733
5734 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5735 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005736 ret = ring_buffer_resize(trace_buf->buffer,
5737 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005738 if (ret < 0)
5739 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005740 per_cpu_ptr(trace_buf->data, cpu)->entries =
5741 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005742 }
5743 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005744 ret = ring_buffer_resize(trace_buf->buffer,
5745 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005746 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005747 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5748 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005749 }
5750
5751 return ret;
5752}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005753#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005754
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005755static int __tracing_resize_ring_buffer(struct trace_array *tr,
5756 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005757{
5758 int ret;
5759
5760 /*
5761 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005762 * we use the size that was given, and we can forget about
5763 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005764 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005765 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005766
Steven Rostedtb382ede62012-10-10 21:44:34 -04005767 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005768 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005769 return 0;
5770
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005771 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005772 if (ret < 0)
5773 return ret;
5774
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005775#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005776 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5777 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005778 goto out;
5779
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005780 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005781 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005782 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5783 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005784 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005785 /*
5786 * AARGH! We are left with different
5787 * size max buffer!!!!
5788 * The max buffer is our "snapshot" buffer.
5789 * When a tracer needs a snapshot (one of the
5790 * latency tracers), it swaps the max buffer
5791 * with the saved snap shot. We succeeded to
5792 * update the size of the main buffer, but failed to
5793 * update the size of the max buffer. But when we tried
5794 * to reset the main buffer to the original size, we
5795 * failed there too. This is very unlikely to
5796 * happen, but if it does, warn and kill all
5797 * tracing.
5798 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005799 WARN_ON(1);
5800 tracing_disabled = 1;
5801 }
5802 return ret;
5803 }
5804
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005805 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005806 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005807 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005808 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005809
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005810 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005811#endif /* CONFIG_TRACER_MAX_TRACE */
5812
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005813 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005814 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005815 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005816 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005817
5818 return ret;
5819}
5820
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005821ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5822 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005823{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005824 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005825
5826 mutex_lock(&trace_types_lock);
5827
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005828 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5829 /* make sure, this cpu is enabled in the mask */
5830 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5831 ret = -EINVAL;
5832 goto out;
5833 }
5834 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005835
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005836 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005837 if (ret < 0)
5838 ret = -ENOMEM;
5839
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005840out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005841 mutex_unlock(&trace_types_lock);
5842
5843 return ret;
5844}
5845
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005846
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005847/**
5848 * tracing_update_buffers - used by tracing facility to expand ring buffers
5849 *
5850 * To save on memory when the tracing is never used on a system with it
5851 * configured in. The ring buffers are set to a minimum size. But once
5852 * a user starts to use the tracing facility, then they need to grow
5853 * to their default size.
5854 *
5855 * This function is to be called when a tracer is about to be used.
5856 */
5857int tracing_update_buffers(void)
5858{
5859 int ret = 0;
5860
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005861 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005862 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005863 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005864 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005865 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005866
5867 return ret;
5868}
5869
Steven Rostedt577b7852009-02-26 23:43:05 -05005870struct trace_option_dentry;
5871
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005872static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005873create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005874
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005875/*
5876 * Used to clear out the tracer before deletion of an instance.
5877 * Must have trace_types_lock held.
5878 */
5879static void tracing_set_nop(struct trace_array *tr)
5880{
5881 if (tr->current_trace == &nop_trace)
5882 return;
5883
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005884 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005885
5886 if (tr->current_trace->reset)
5887 tr->current_trace->reset(tr);
5888
5889 tr->current_trace = &nop_trace;
5890}
5891
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005892static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005893{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005894 /* Only enable if the directory has been created already. */
5895 if (!tr->dir)
5896 return;
5897
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005898 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005899}
5900
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005901int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005902{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005903 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005904#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005905 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005906#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005907 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005908
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005909 mutex_lock(&trace_types_lock);
5910
Steven Rostedt73c51622009-03-11 13:42:01 -04005911 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005912 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005913 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005914 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005915 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005916 ret = 0;
5917 }
5918
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005919 for (t = trace_types; t; t = t->next) {
5920 if (strcmp(t->name, buf) == 0)
5921 break;
5922 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005923 if (!t) {
5924 ret = -EINVAL;
5925 goto out;
5926 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005927 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005928 goto out;
5929
Tom Zanussia35873a2019-02-13 17:42:45 -06005930#ifdef CONFIG_TRACER_SNAPSHOT
5931 if (t->use_max_tr) {
5932 arch_spin_lock(&tr->max_lock);
5933 if (tr->cond_snapshot)
5934 ret = -EBUSY;
5935 arch_spin_unlock(&tr->max_lock);
5936 if (ret)
5937 goto out;
5938 }
5939#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005940 /* Some tracers won't work on kernel command line */
5941 if (system_state < SYSTEM_RUNNING && t->noboot) {
5942 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5943 t->name);
5944 goto out;
5945 }
5946
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005947 /* Some tracers are only allowed for the top level buffer */
5948 if (!trace_ok_for_array(t, tr)) {
5949 ret = -EINVAL;
5950 goto out;
5951 }
5952
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005953 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04005954 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005955 ret = -EBUSY;
5956 goto out;
5957 }
5958
Steven Rostedt9f029e82008-11-12 15:24:24 -05005959 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005960
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005961 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005962
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005963 if (tr->current_trace->reset)
5964 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005965
Paul E. McKenney74401722018-11-06 18:44:52 -08005966 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005967 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005968
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005969#ifdef CONFIG_TRACER_MAX_TRACE
5970 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005971
5972 if (had_max_tr && !t->use_max_tr) {
5973 /*
5974 * We need to make sure that the update_max_tr sees that
5975 * current_trace changed to nop_trace to keep it from
5976 * swapping the buffers after we resize it.
5977 * The update_max_tr is called from interrupts disabled
5978 * so a synchronized_sched() is sufficient.
5979 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005980 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005981 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005982 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005983#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005984
5985#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005986 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005987 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005988 if (ret < 0)
5989 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005990 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005991#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005992
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005993 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005994 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005995 if (ret)
5996 goto out;
5997 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005999 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006000 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006001 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006002 out:
6003 mutex_unlock(&trace_types_lock);
6004
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006005 return ret;
6006}
6007
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006008static ssize_t
6009tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6010 size_t cnt, loff_t *ppos)
6011{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006012 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006013 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006014 int i;
6015 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006016 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006017
Steven Rostedt60063a62008-10-28 10:44:24 -04006018 ret = cnt;
6019
Li Zefanee6c2c12009-09-18 14:06:47 +08006020 if (cnt > MAX_TRACER_SIZE)
6021 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006022
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006023 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006024 return -EFAULT;
6025
6026 buf[cnt] = 0;
6027
6028 /* strip ending whitespace. */
6029 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6030 buf[i] = 0;
6031
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006032 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006033 if (err)
6034 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006035
Jiri Olsacf8517c2009-10-23 19:36:16 -04006036 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006037
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006038 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006039}
6040
6041static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006042tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6043 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006044{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006045 char buf[64];
6046 int r;
6047
Steven Rostedtcffae432008-05-12 21:21:00 +02006048 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006049 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006050 if (r > sizeof(buf))
6051 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006052 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006053}
6054
6055static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006056tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6057 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006058{
Hannes Eder5e398412009-02-10 19:44:34 +01006059 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006060 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006061
Peter Huewe22fe9b52011-06-07 21:58:27 +02006062 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6063 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006064 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006065
6066 *ptr = val * 1000;
6067
6068 return cnt;
6069}
6070
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006071static ssize_t
6072tracing_thresh_read(struct file *filp, char __user *ubuf,
6073 size_t cnt, loff_t *ppos)
6074{
6075 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6076}
6077
6078static ssize_t
6079tracing_thresh_write(struct file *filp, const char __user *ubuf,
6080 size_t cnt, loff_t *ppos)
6081{
6082 struct trace_array *tr = filp->private_data;
6083 int ret;
6084
6085 mutex_lock(&trace_types_lock);
6086 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6087 if (ret < 0)
6088 goto out;
6089
6090 if (tr->current_trace->update_thresh) {
6091 ret = tr->current_trace->update_thresh(tr);
6092 if (ret < 0)
6093 goto out;
6094 }
6095
6096 ret = cnt;
6097out:
6098 mutex_unlock(&trace_types_lock);
6099
6100 return ret;
6101}
6102
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006103#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006104
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006105static ssize_t
6106tracing_max_lat_read(struct file *filp, char __user *ubuf,
6107 size_t cnt, loff_t *ppos)
6108{
6109 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6110}
6111
6112static ssize_t
6113tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6114 size_t cnt, loff_t *ppos)
6115{
6116 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6117}
6118
Chen Gange428abb2015-11-10 05:15:15 +08006119#endif
6120
Steven Rostedtb3806b42008-05-12 21:20:46 +02006121static int tracing_open_pipe(struct inode *inode, struct file *filp)
6122{
Oleg Nesterov15544202013-07-23 17:25:57 +02006123 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006124 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006125 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006126
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006127 ret = tracing_check_open_get_tr(tr);
6128 if (ret)
6129 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006130
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006131 mutex_lock(&trace_types_lock);
6132
Steven Rostedtb3806b42008-05-12 21:20:46 +02006133 /* create a buffer to store the information to pass to userspace */
6134 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006135 if (!iter) {
6136 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006137 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006138 goto out;
6139 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006140
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006141 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006142 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006143
6144 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6145 ret = -ENOMEM;
6146 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306147 }
6148
Steven Rostedta3097202008-11-07 22:36:02 -05006149 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306150 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006151
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006152 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006153 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6154
David Sharp8be07092012-11-13 12:18:22 -08006155 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006156 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006157 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6158
Oleg Nesterov15544202013-07-23 17:25:57 +02006159 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006160 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006161 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006162 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006163 filp->private_data = iter;
6164
Steven Rostedt107bad82008-05-12 21:21:01 +02006165 if (iter->trace->pipe_open)
6166 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006167
Arnd Bergmannb4447862010-07-07 23:40:11 +02006168 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006169
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006170 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006171out:
6172 mutex_unlock(&trace_types_lock);
6173 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006174
6175fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006176 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006177 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006178 mutex_unlock(&trace_types_lock);
6179 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006180}
6181
6182static int tracing_release_pipe(struct inode *inode, struct file *file)
6183{
6184 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006185 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006186
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006187 mutex_lock(&trace_types_lock);
6188
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006189 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006190
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006191 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006192 iter->trace->pipe_close(iter);
6193
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006194 mutex_unlock(&trace_types_lock);
6195
Rusty Russell44623442009-01-01 10:12:23 +10306196 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006197 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006198 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006199
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006200 trace_array_put(tr);
6201
Steven Rostedtb3806b42008-05-12 21:20:46 +02006202 return 0;
6203}
6204
Al Viro9dd95742017-07-03 00:42:43 -04006205static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006206trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006207{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006208 struct trace_array *tr = iter->tr;
6209
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006210 /* Iterators are static, they should be filled or empty */
6211 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006212 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006213
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006214 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006215 /*
6216 * Always select as readable when in blocking mode
6217 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006218 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006219 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006220 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006221 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006222}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006223
Al Viro9dd95742017-07-03 00:42:43 -04006224static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006225tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6226{
6227 struct trace_iterator *iter = filp->private_data;
6228
6229 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006230}
6231
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006232/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006233static int tracing_wait_pipe(struct file *filp)
6234{
6235 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006236 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006237
6238 while (trace_empty(iter)) {
6239
6240 if ((filp->f_flags & O_NONBLOCK)) {
6241 return -EAGAIN;
6242 }
6243
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006244 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006245 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006246 * We still block if tracing is disabled, but we have never
6247 * read anything. This allows a user to cat this file, and
6248 * then enable tracing. But after we have read something,
6249 * we give an EOF when tracing is again disabled.
6250 *
6251 * iter->pos will be 0 if we haven't read anything.
6252 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006253 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006254 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006255
6256 mutex_unlock(&iter->mutex);
6257
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006258 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006259
6260 mutex_lock(&iter->mutex);
6261
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006262 if (ret)
6263 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006264 }
6265
6266 return 1;
6267}
6268
Steven Rostedtb3806b42008-05-12 21:20:46 +02006269/*
6270 * Consumer reader.
6271 */
6272static ssize_t
6273tracing_read_pipe(struct file *filp, char __user *ubuf,
6274 size_t cnt, loff_t *ppos)
6275{
6276 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006277 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006278
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006279 /*
6280 * Avoid more than one consumer on a single file descriptor
6281 * This is just a matter of traces coherency, the ring buffer itself
6282 * is protected.
6283 */
6284 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006285
6286 /* return any leftover data */
6287 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6288 if (sret != -EBUSY)
6289 goto out;
6290
6291 trace_seq_init(&iter->seq);
6292
Steven Rostedt107bad82008-05-12 21:21:01 +02006293 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006294 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6295 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006296 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006297 }
6298
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006299waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006300 sret = tracing_wait_pipe(filp);
6301 if (sret <= 0)
6302 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006303
6304 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006305 if (trace_empty(iter)) {
6306 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006307 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006308 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006309
6310 if (cnt >= PAGE_SIZE)
6311 cnt = PAGE_SIZE - 1;
6312
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006313 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006314 memset(&iter->seq, 0,
6315 sizeof(struct trace_iterator) -
6316 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006317 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006318 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006319 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006320
Lai Jiangshan4f535962009-05-18 19:35:34 +08006321 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006322 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006323 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006324 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006325 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006326
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006327 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006328 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006329 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006330 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006331 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006332 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006333 if (ret != TRACE_TYPE_NO_CONSUME)
6334 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006335
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006336 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006337 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006338
6339 /*
6340 * Setting the full flag means we reached the trace_seq buffer
6341 * size and we should leave by partial output condition above.
6342 * One of the trace_seq_* functions is not used properly.
6343 */
6344 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6345 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006346 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006347 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006348 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006349
Steven Rostedtb3806b42008-05-12 21:20:46 +02006350 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006351 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006352 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006353 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006354
6355 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006356 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006357 * entries, go back to wait for more entries.
6358 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006359 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006360 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006361
Steven Rostedt107bad82008-05-12 21:21:01 +02006362out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006363 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006364
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006365 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006366}
6367
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006368static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6369 unsigned int idx)
6370{
6371 __free_page(spd->pages[idx]);
6372}
6373
Steven Rostedt34cd4992009-02-09 12:06:29 -05006374static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006375tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006376{
6377 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006378 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006379 int ret;
6380
6381 /* Seq buffer is page-sized, exactly what we need. */
6382 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006383 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006384 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006385
6386 if (trace_seq_has_overflowed(&iter->seq)) {
6387 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006388 break;
6389 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006390
6391 /*
6392 * This should not be hit, because it should only
6393 * be set if the iter->seq overflowed. But check it
6394 * anyway to be safe.
6395 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006396 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006397 iter->seq.seq.len = save_len;
6398 break;
6399 }
6400
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006401 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006402 if (rem < count) {
6403 rem = 0;
6404 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006405 break;
6406 }
6407
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006408 if (ret != TRACE_TYPE_NO_CONSUME)
6409 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006410 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006411 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006412 rem = 0;
6413 iter->ent = NULL;
6414 break;
6415 }
6416 }
6417
6418 return rem;
6419}
6420
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006421static ssize_t tracing_splice_read_pipe(struct file *filp,
6422 loff_t *ppos,
6423 struct pipe_inode_info *pipe,
6424 size_t len,
6425 unsigned int flags)
6426{
Jens Axboe35f3d142010-05-20 10:43:18 +02006427 struct page *pages_def[PIPE_DEF_BUFFERS];
6428 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006429 struct trace_iterator *iter = filp->private_data;
6430 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006431 .pages = pages_def,
6432 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006433 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006434 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006435 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006436 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006437 };
6438 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006439 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006440 unsigned int i;
6441
Jens Axboe35f3d142010-05-20 10:43:18 +02006442 if (splice_grow_spd(pipe, &spd))
6443 return -ENOMEM;
6444
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006445 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006446
6447 if (iter->trace->splice_read) {
6448 ret = iter->trace->splice_read(iter, filp,
6449 ppos, pipe, len, flags);
6450 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006451 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006452 }
6453
6454 ret = tracing_wait_pipe(filp);
6455 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006456 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006457
Jason Wessel955b61e2010-08-05 09:22:23 -05006458 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006459 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006460 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006461 }
6462
Lai Jiangshan4f535962009-05-18 19:35:34 +08006463 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006464 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006465
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006466 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006467 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006468 spd.pages[i] = alloc_page(GFP_KERNEL);
6469 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006470 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006471
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006472 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006473
6474 /* Copy the data into the page, so we can start over. */
6475 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006476 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006477 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006478 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006479 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006480 break;
6481 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006482 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006483 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006484
Steven Rostedtf9520752009-03-02 14:04:40 -05006485 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006486 }
6487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006488 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006489 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006490 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006491
6492 spd.nr_pages = i;
6493
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006494 if (i)
6495 ret = splice_to_pipe(pipe, &spd);
6496 else
6497 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006498out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006499 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006500 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006501
Steven Rostedt34cd4992009-02-09 12:06:29 -05006502out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006503 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006504 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006505}
6506
Steven Rostedta98a3c32008-05-12 21:20:59 +02006507static ssize_t
6508tracing_entries_read(struct file *filp, char __user *ubuf,
6509 size_t cnt, loff_t *ppos)
6510{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006511 struct inode *inode = file_inode(filp);
6512 struct trace_array *tr = inode->i_private;
6513 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006514 char buf[64];
6515 int r = 0;
6516 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006517
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006518 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006519
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006520 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006521 int cpu, buf_size_same;
6522 unsigned long size;
6523
6524 size = 0;
6525 buf_size_same = 1;
6526 /* check if all cpu sizes are same */
6527 for_each_tracing_cpu(cpu) {
6528 /* fill in the size from first enabled cpu */
6529 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006530 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6531 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006532 buf_size_same = 0;
6533 break;
6534 }
6535 }
6536
6537 if (buf_size_same) {
6538 if (!ring_buffer_expanded)
6539 r = sprintf(buf, "%lu (expanded: %lu)\n",
6540 size >> 10,
6541 trace_buf_size >> 10);
6542 else
6543 r = sprintf(buf, "%lu\n", size >> 10);
6544 } else
6545 r = sprintf(buf, "X\n");
6546 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006547 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006548
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006549 mutex_unlock(&trace_types_lock);
6550
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006551 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6552 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006553}
6554
6555static ssize_t
6556tracing_entries_write(struct file *filp, const char __user *ubuf,
6557 size_t cnt, loff_t *ppos)
6558{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006559 struct inode *inode = file_inode(filp);
6560 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006561 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006562 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006563
Peter Huewe22fe9b52011-06-07 21:58:27 +02006564 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6565 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006566 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006567
6568 /* must have at least 1 entry */
6569 if (!val)
6570 return -EINVAL;
6571
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006572 /* value is in KB */
6573 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006574 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006575 if (ret < 0)
6576 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006577
Jiri Olsacf8517c2009-10-23 19:36:16 -04006578 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006579
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006580 return cnt;
6581}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006582
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006583static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006584tracing_total_entries_read(struct file *filp, char __user *ubuf,
6585 size_t cnt, loff_t *ppos)
6586{
6587 struct trace_array *tr = filp->private_data;
6588 char buf[64];
6589 int r, cpu;
6590 unsigned long size = 0, expanded_size = 0;
6591
6592 mutex_lock(&trace_types_lock);
6593 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006594 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006595 if (!ring_buffer_expanded)
6596 expanded_size += trace_buf_size >> 10;
6597 }
6598 if (ring_buffer_expanded)
6599 r = sprintf(buf, "%lu\n", size);
6600 else
6601 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6602 mutex_unlock(&trace_types_lock);
6603
6604 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6605}
6606
6607static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006608tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6609 size_t cnt, loff_t *ppos)
6610{
6611 /*
6612 * There is no need to read what the user has written, this function
6613 * is just to make sure that there is no error when "echo" is used
6614 */
6615
6616 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006617
6618 return cnt;
6619}
6620
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006621static int
6622tracing_free_buffer_release(struct inode *inode, struct file *filp)
6623{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006624 struct trace_array *tr = inode->i_private;
6625
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006626 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006627 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006628 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006629 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006630 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006631
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006632 trace_array_put(tr);
6633
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006634 return 0;
6635}
6636
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006637static ssize_t
6638tracing_mark_write(struct file *filp, const char __user *ubuf,
6639 size_t cnt, loff_t *fpos)
6640{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006641 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006642 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006643 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006644 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006645 struct print_entry *entry;
6646 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006647 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006648 int size;
6649 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006650
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006651/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006652#define FAULTED_STR "<faulted>"
6653#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006654
Steven Rostedtc76f0692008-11-07 22:36:02 -05006655 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006656 return -EINVAL;
6657
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006658 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006659 return -EINVAL;
6660
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006661 if (cnt > TRACE_BUF_SIZE)
6662 cnt = TRACE_BUF_SIZE;
6663
Steven Rostedtd696b582011-09-22 11:50:27 -04006664 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006665
Steven Rostedtd696b582011-09-22 11:50:27 -04006666 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006667 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6668
6669 /* If less than "<faulted>", then make sure we can still add that */
6670 if (cnt < FAULTED_SIZE)
6671 size += FAULTED_SIZE - cnt;
6672
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006673 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006674 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6675 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006676 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006677 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006678 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006679
6680 entry = ring_buffer_event_data(event);
6681 entry->ip = _THIS_IP_;
6682
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006683 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6684 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006685 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006686 cnt = FAULTED_SIZE;
6687 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006688 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006689 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006690
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006691 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6692 /* do not add \n before testing triggers, but add \0 */
6693 entry->buf[cnt] = '\0';
6694 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6695 }
6696
Steven Rostedtd696b582011-09-22 11:50:27 -04006697 if (entry->buf[cnt - 1] != '\n') {
6698 entry->buf[cnt] = '\n';
6699 entry->buf[cnt + 1] = '\0';
6700 } else
6701 entry->buf[cnt] = '\0';
6702
Tingwei Zhang458999c2020-10-05 10:13:15 +03006703 if (static_branch_unlikely(&trace_marker_exports_enabled))
6704 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006705 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006706
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006707 if (tt)
6708 event_triggers_post_call(tr->trace_marker_file, tt);
6709
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006710 if (written > 0)
6711 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006712
Steven Rostedtfa32e852016-07-06 15:25:08 -04006713 return written;
6714}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006715
Steven Rostedtfa32e852016-07-06 15:25:08 -04006716/* Limit it for now to 3K (including tag) */
6717#define RAW_DATA_MAX_SIZE (1024*3)
6718
6719static ssize_t
6720tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6721 size_t cnt, loff_t *fpos)
6722{
6723 struct trace_array *tr = filp->private_data;
6724 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006725 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006726 struct raw_data_entry *entry;
6727 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006728 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006729 int size;
6730 int len;
6731
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006732#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6733
Steven Rostedtfa32e852016-07-06 15:25:08 -04006734 if (tracing_disabled)
6735 return -EINVAL;
6736
6737 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6738 return -EINVAL;
6739
6740 /* The marker must at least have a tag id */
6741 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6742 return -EINVAL;
6743
6744 if (cnt > TRACE_BUF_SIZE)
6745 cnt = TRACE_BUF_SIZE;
6746
6747 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6748
Steven Rostedtfa32e852016-07-06 15:25:08 -04006749 local_save_flags(irq_flags);
6750 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006751 if (cnt < FAULT_SIZE_ID)
6752 size += FAULT_SIZE_ID - cnt;
6753
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006754 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006755 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6756 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006757 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006758 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006759 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006760
6761 entry = ring_buffer_event_data(event);
6762
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006763 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6764 if (len) {
6765 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006766 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006767 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006768 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006769 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006770
6771 __buffer_unlock_commit(buffer, event);
6772
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006773 if (written > 0)
6774 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006775
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006776 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006777}
6778
Li Zefan13f16d22009-12-08 11:16:11 +08006779static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006780{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006781 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006782 int i;
6783
6784 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006785 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006786 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006787 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6788 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006789 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006790
Li Zefan13f16d22009-12-08 11:16:11 +08006791 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006792}
6793
Tom Zanussid71bd342018-01-15 20:52:07 -06006794int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006795{
Zhaolei5079f322009-08-25 16:12:56 +08006796 int i;
6797
Zhaolei5079f322009-08-25 16:12:56 +08006798 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6799 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6800 break;
6801 }
6802 if (i == ARRAY_SIZE(trace_clocks))
6803 return -EINVAL;
6804
Zhaolei5079f322009-08-25 16:12:56 +08006805 mutex_lock(&trace_types_lock);
6806
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006807 tr->clock_id = i;
6808
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006809 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006810
David Sharp60303ed2012-10-11 16:27:52 -07006811 /*
6812 * New clock may not be consistent with the previous clock.
6813 * Reset the buffer so that it doesn't have incomparable timestamps.
6814 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006815 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006816
6817#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006818 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006819 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006820 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006821#endif
David Sharp60303ed2012-10-11 16:27:52 -07006822
Zhaolei5079f322009-08-25 16:12:56 +08006823 mutex_unlock(&trace_types_lock);
6824
Steven Rostedte1e232c2014-02-10 23:38:46 -05006825 return 0;
6826}
6827
6828static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6829 size_t cnt, loff_t *fpos)
6830{
6831 struct seq_file *m = filp->private_data;
6832 struct trace_array *tr = m->private;
6833 char buf[64];
6834 const char *clockstr;
6835 int ret;
6836
6837 if (cnt >= sizeof(buf))
6838 return -EINVAL;
6839
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006840 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006841 return -EFAULT;
6842
6843 buf[cnt] = 0;
6844
6845 clockstr = strstrip(buf);
6846
6847 ret = tracing_set_clock(tr, clockstr);
6848 if (ret)
6849 return ret;
6850
Zhaolei5079f322009-08-25 16:12:56 +08006851 *fpos += cnt;
6852
6853 return cnt;
6854}
6855
Li Zefan13f16d22009-12-08 11:16:11 +08006856static int tracing_clock_open(struct inode *inode, struct file *file)
6857{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006858 struct trace_array *tr = inode->i_private;
6859 int ret;
6860
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006861 ret = tracing_check_open_get_tr(tr);
6862 if (ret)
6863 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006864
6865 ret = single_open(file, tracing_clock_show, inode->i_private);
6866 if (ret < 0)
6867 trace_array_put(tr);
6868
6869 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006870}
6871
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006872static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6873{
6874 struct trace_array *tr = m->private;
6875
6876 mutex_lock(&trace_types_lock);
6877
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006878 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006879 seq_puts(m, "delta [absolute]\n");
6880 else
6881 seq_puts(m, "[delta] absolute\n");
6882
6883 mutex_unlock(&trace_types_lock);
6884
6885 return 0;
6886}
6887
6888static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6889{
6890 struct trace_array *tr = inode->i_private;
6891 int ret;
6892
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006893 ret = tracing_check_open_get_tr(tr);
6894 if (ret)
6895 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006896
6897 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6898 if (ret < 0)
6899 trace_array_put(tr);
6900
6901 return ret;
6902}
6903
Tom Zanussi00b41452018-01-15 20:51:39 -06006904int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6905{
6906 int ret = 0;
6907
6908 mutex_lock(&trace_types_lock);
6909
6910 if (abs && tr->time_stamp_abs_ref++)
6911 goto out;
6912
6913 if (!abs) {
6914 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6915 ret = -EINVAL;
6916 goto out;
6917 }
6918
6919 if (--tr->time_stamp_abs_ref)
6920 goto out;
6921 }
6922
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006923 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006924
6925#ifdef CONFIG_TRACER_MAX_TRACE
6926 if (tr->max_buffer.buffer)
6927 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6928#endif
6929 out:
6930 mutex_unlock(&trace_types_lock);
6931
6932 return ret;
6933}
6934
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006935struct ftrace_buffer_info {
6936 struct trace_iterator iter;
6937 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006938 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006939 unsigned int read;
6940};
6941
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006942#ifdef CONFIG_TRACER_SNAPSHOT
6943static int tracing_snapshot_open(struct inode *inode, struct file *file)
6944{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006945 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006946 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006947 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006948 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006949
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006950 ret = tracing_check_open_get_tr(tr);
6951 if (ret)
6952 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006953
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006954 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006955 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006956 if (IS_ERR(iter))
6957 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006958 } else {
6959 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006960 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006961 m = kzalloc(sizeof(*m), GFP_KERNEL);
6962 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006963 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006964 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6965 if (!iter) {
6966 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006967 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006968 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006969 ret = 0;
6970
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006971 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006972 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006973 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006974 m->private = iter;
6975 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006976 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006977out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006978 if (ret < 0)
6979 trace_array_put(tr);
6980
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006981 return ret;
6982}
6983
6984static ssize_t
6985tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6986 loff_t *ppos)
6987{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006988 struct seq_file *m = filp->private_data;
6989 struct trace_iterator *iter = m->private;
6990 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006991 unsigned long val;
6992 int ret;
6993
6994 ret = tracing_update_buffers();
6995 if (ret < 0)
6996 return ret;
6997
6998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6999 if (ret)
7000 return ret;
7001
7002 mutex_lock(&trace_types_lock);
7003
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007004 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007005 ret = -EBUSY;
7006 goto out;
7007 }
7008
Tom Zanussia35873a2019-02-13 17:42:45 -06007009 arch_spin_lock(&tr->max_lock);
7010 if (tr->cond_snapshot)
7011 ret = -EBUSY;
7012 arch_spin_unlock(&tr->max_lock);
7013 if (ret)
7014 goto out;
7015
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007016 switch (val) {
7017 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007018 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7019 ret = -EINVAL;
7020 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007021 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007022 if (tr->allocated_snapshot)
7023 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007024 break;
7025 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007026/* Only allow per-cpu swap if the ring buffer supports it */
7027#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7028 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7029 ret = -EINVAL;
7030 break;
7031 }
7032#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007033 if (tr->allocated_snapshot)
7034 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007035 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007036 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007037 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007038 if (ret < 0)
7039 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007040 local_irq_disable();
7041 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007042 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007043 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007044 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007045 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007046 local_irq_enable();
7047 break;
7048 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007049 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007050 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7051 tracing_reset_online_cpus(&tr->max_buffer);
7052 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007053 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007054 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007055 break;
7056 }
7057
7058 if (ret >= 0) {
7059 *ppos += cnt;
7060 ret = cnt;
7061 }
7062out:
7063 mutex_unlock(&trace_types_lock);
7064 return ret;
7065}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007066
7067static int tracing_snapshot_release(struct inode *inode, struct file *file)
7068{
7069 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007070 int ret;
7071
7072 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007073
7074 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007075 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007076
7077 /* If write only, the seq_file is just a stub */
7078 if (m)
7079 kfree(m->private);
7080 kfree(m);
7081
7082 return 0;
7083}
7084
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007085static int tracing_buffers_open(struct inode *inode, struct file *filp);
7086static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7087 size_t count, loff_t *ppos);
7088static int tracing_buffers_release(struct inode *inode, struct file *file);
7089static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7090 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7091
7092static int snapshot_raw_open(struct inode *inode, struct file *filp)
7093{
7094 struct ftrace_buffer_info *info;
7095 int ret;
7096
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007097 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007098 ret = tracing_buffers_open(inode, filp);
7099 if (ret < 0)
7100 return ret;
7101
7102 info = filp->private_data;
7103
7104 if (info->iter.trace->use_max_tr) {
7105 tracing_buffers_release(inode, filp);
7106 return -EBUSY;
7107 }
7108
7109 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007110 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007111
7112 return ret;
7113}
7114
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007115#endif /* CONFIG_TRACER_SNAPSHOT */
7116
7117
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007118static const struct file_operations tracing_thresh_fops = {
7119 .open = tracing_open_generic,
7120 .read = tracing_thresh_read,
7121 .write = tracing_thresh_write,
7122 .llseek = generic_file_llseek,
7123};
7124
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007125#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007126static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007127 .open = tracing_open_generic,
7128 .read = tracing_max_lat_read,
7129 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007130 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007131};
Chen Gange428abb2015-11-10 05:15:15 +08007132#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007133
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007134static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007135 .open = tracing_open_generic,
7136 .read = tracing_set_trace_read,
7137 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007138 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007139};
7140
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007141static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007142 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007143 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007144 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007145 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007146 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007147 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007148};
7149
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007150static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007151 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007152 .read = tracing_entries_read,
7153 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007154 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007155 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007156};
7157
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007158static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007159 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007160 .read = tracing_total_entries_read,
7161 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007162 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007163};
7164
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007165static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007166 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007167 .write = tracing_free_buffer_write,
7168 .release = tracing_free_buffer_release,
7169};
7170
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007171static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007172 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007173 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007174 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007175 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007176};
7177
Steven Rostedtfa32e852016-07-06 15:25:08 -04007178static const struct file_operations tracing_mark_raw_fops = {
7179 .open = tracing_open_generic_tr,
7180 .write = tracing_mark_raw_write,
7181 .llseek = generic_file_llseek,
7182 .release = tracing_release_generic_tr,
7183};
7184
Zhaolei5079f322009-08-25 16:12:56 +08007185static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007186 .open = tracing_clock_open,
7187 .read = seq_read,
7188 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007189 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007190 .write = tracing_clock_write,
7191};
7192
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007193static const struct file_operations trace_time_stamp_mode_fops = {
7194 .open = tracing_time_stamp_mode_open,
7195 .read = seq_read,
7196 .llseek = seq_lseek,
7197 .release = tracing_single_release_tr,
7198};
7199
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007200#ifdef CONFIG_TRACER_SNAPSHOT
7201static const struct file_operations snapshot_fops = {
7202 .open = tracing_snapshot_open,
7203 .read = seq_read,
7204 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007205 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007206 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007207};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007208
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007209static const struct file_operations snapshot_raw_fops = {
7210 .open = snapshot_raw_open,
7211 .read = tracing_buffers_read,
7212 .release = tracing_buffers_release,
7213 .splice_read = tracing_buffers_splice_read,
7214 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007215};
7216
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007217#endif /* CONFIG_TRACER_SNAPSHOT */
7218
Tom Zanussi8a062902019-03-31 18:48:15 -05007219#define TRACING_LOG_ERRS_MAX 8
7220#define TRACING_LOG_LOC_MAX 128
7221
7222#define CMD_PREFIX " Command: "
7223
7224struct err_info {
7225 const char **errs; /* ptr to loc-specific array of err strings */
7226 u8 type; /* index into errs -> specific err string */
7227 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7228 u64 ts;
7229};
7230
7231struct tracing_log_err {
7232 struct list_head list;
7233 struct err_info info;
7234 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7235 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7236};
7237
Tom Zanussi8a062902019-03-31 18:48:15 -05007238static DEFINE_MUTEX(tracing_err_log_lock);
7239
YueHaibingff585c52019-06-14 23:32:10 +08007240static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007241{
7242 struct tracing_log_err *err;
7243
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007244 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007245 err = kzalloc(sizeof(*err), GFP_KERNEL);
7246 if (!err)
7247 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007248 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007249
7250 return err;
7251 }
7252
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007253 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007254 list_del(&err->list);
7255
7256 return err;
7257}
7258
7259/**
7260 * err_pos - find the position of a string within a command for error careting
7261 * @cmd: The tracing command that caused the error
7262 * @str: The string to position the caret at within @cmd
7263 *
7264 * Finds the position of the first occurence of @str within @cmd. The
7265 * return value can be passed to tracing_log_err() for caret placement
7266 * within @cmd.
7267 *
7268 * Returns the index within @cmd of the first occurence of @str or 0
7269 * if @str was not found.
7270 */
7271unsigned int err_pos(char *cmd, const char *str)
7272{
7273 char *found;
7274
7275 if (WARN_ON(!strlen(cmd)))
7276 return 0;
7277
7278 found = strstr(cmd, str);
7279 if (found)
7280 return found - cmd;
7281
7282 return 0;
7283}
7284
7285/**
7286 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007287 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007288 * @loc: A string describing where the error occurred
7289 * @cmd: The tracing command that caused the error
7290 * @errs: The array of loc-specific static error strings
7291 * @type: The index into errs[], which produces the specific static err string
7292 * @pos: The position the caret should be placed in the cmd
7293 *
7294 * Writes an error into tracing/error_log of the form:
7295 *
7296 * <loc>: error: <text>
7297 * Command: <cmd>
7298 * ^
7299 *
7300 * tracing/error_log is a small log file containing the last
7301 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7302 * unless there has been a tracing error, and the error log can be
7303 * cleared and have its memory freed by writing the empty string in
7304 * truncation mode to it i.e. echo > tracing/error_log.
7305 *
7306 * NOTE: the @errs array along with the @type param are used to
7307 * produce a static error string - this string is not copied and saved
7308 * when the error is logged - only a pointer to it is saved. See
7309 * existing callers for examples of how static strings are typically
7310 * defined for use with tracing_log_err().
7311 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007312void tracing_log_err(struct trace_array *tr,
7313 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007314 const char **errs, u8 type, u8 pos)
7315{
7316 struct tracing_log_err *err;
7317
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007318 if (!tr)
7319 tr = &global_trace;
7320
Tom Zanussi8a062902019-03-31 18:48:15 -05007321 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007322 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007323 if (PTR_ERR(err) == -ENOMEM) {
7324 mutex_unlock(&tracing_err_log_lock);
7325 return;
7326 }
7327
7328 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7329 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7330
7331 err->info.errs = errs;
7332 err->info.type = type;
7333 err->info.pos = pos;
7334 err->info.ts = local_clock();
7335
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007336 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007337 mutex_unlock(&tracing_err_log_lock);
7338}
7339
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007340static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007341{
7342 struct tracing_log_err *err, *next;
7343
7344 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007345 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007346 list_del(&err->list);
7347 kfree(err);
7348 }
7349
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007350 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007351 mutex_unlock(&tracing_err_log_lock);
7352}
7353
7354static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7355{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007356 struct trace_array *tr = m->private;
7357
Tom Zanussi8a062902019-03-31 18:48:15 -05007358 mutex_lock(&tracing_err_log_lock);
7359
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007360 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007361}
7362
7363static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7364{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007365 struct trace_array *tr = m->private;
7366
7367 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007368}
7369
7370static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7371{
7372 mutex_unlock(&tracing_err_log_lock);
7373}
7374
7375static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7376{
7377 u8 i;
7378
7379 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7380 seq_putc(m, ' ');
7381 for (i = 0; i < pos; i++)
7382 seq_putc(m, ' ');
7383 seq_puts(m, "^\n");
7384}
7385
7386static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7387{
7388 struct tracing_log_err *err = v;
7389
7390 if (err) {
7391 const char *err_text = err->info.errs[err->info.type];
7392 u64 sec = err->info.ts;
7393 u32 nsec;
7394
7395 nsec = do_div(sec, NSEC_PER_SEC);
7396 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7397 err->loc, err_text);
7398 seq_printf(m, "%s", err->cmd);
7399 tracing_err_log_show_pos(m, err->info.pos);
7400 }
7401
7402 return 0;
7403}
7404
7405static const struct seq_operations tracing_err_log_seq_ops = {
7406 .start = tracing_err_log_seq_start,
7407 .next = tracing_err_log_seq_next,
7408 .stop = tracing_err_log_seq_stop,
7409 .show = tracing_err_log_seq_show
7410};
7411
7412static int tracing_err_log_open(struct inode *inode, struct file *file)
7413{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007414 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007415 int ret = 0;
7416
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007417 ret = tracing_check_open_get_tr(tr);
7418 if (ret)
7419 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007420
Tom Zanussi8a062902019-03-31 18:48:15 -05007421 /* If this file was opened for write, then erase contents */
7422 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007423 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007424
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007425 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007426 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007427 if (!ret) {
7428 struct seq_file *m = file->private_data;
7429 m->private = tr;
7430 } else {
7431 trace_array_put(tr);
7432 }
7433 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007434 return ret;
7435}
7436
7437static ssize_t tracing_err_log_write(struct file *file,
7438 const char __user *buffer,
7439 size_t count, loff_t *ppos)
7440{
7441 return count;
7442}
7443
Takeshi Misawad122ed62019-06-28 19:56:40 +09007444static int tracing_err_log_release(struct inode *inode, struct file *file)
7445{
7446 struct trace_array *tr = inode->i_private;
7447
7448 trace_array_put(tr);
7449
7450 if (file->f_mode & FMODE_READ)
7451 seq_release(inode, file);
7452
7453 return 0;
7454}
7455
Tom Zanussi8a062902019-03-31 18:48:15 -05007456static const struct file_operations tracing_err_log_fops = {
7457 .open = tracing_err_log_open,
7458 .write = tracing_err_log_write,
7459 .read = seq_read,
7460 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007461 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007462};
7463
Steven Rostedt2cadf912008-12-01 22:20:19 -05007464static int tracing_buffers_open(struct inode *inode, struct file *filp)
7465{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007466 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007467 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007468 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007469
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007470 ret = tracing_check_open_get_tr(tr);
7471 if (ret)
7472 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007473
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007474 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007475 if (!info) {
7476 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007477 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007478 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007479
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007480 mutex_lock(&trace_types_lock);
7481
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007482 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007483 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007484 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007485 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007486 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007487 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007488 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007489
7490 filp->private_data = info;
7491
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007492 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007493
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007494 mutex_unlock(&trace_types_lock);
7495
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007496 ret = nonseekable_open(inode, filp);
7497 if (ret < 0)
7498 trace_array_put(tr);
7499
7500 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007501}
7502
Al Viro9dd95742017-07-03 00:42:43 -04007503static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007504tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7505{
7506 struct ftrace_buffer_info *info = filp->private_data;
7507 struct trace_iterator *iter = &info->iter;
7508
7509 return trace_poll(iter, filp, poll_table);
7510}
7511
Steven Rostedt2cadf912008-12-01 22:20:19 -05007512static ssize_t
7513tracing_buffers_read(struct file *filp, char __user *ubuf,
7514 size_t count, loff_t *ppos)
7515{
7516 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007517 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007518 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007519 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007520
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007521 if (!count)
7522 return 0;
7523
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007524#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007525 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7526 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007527#endif
7528
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007529 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007530 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007531 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007532 if (IS_ERR(info->spare)) {
7533 ret = PTR_ERR(info->spare);
7534 info->spare = NULL;
7535 } else {
7536 info->spare_cpu = iter->cpu_file;
7537 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007538 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007539 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007540 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007541
Steven Rostedt2cadf912008-12-01 22:20:19 -05007542 /* Do we have previous read data to read? */
7543 if (info->read < PAGE_SIZE)
7544 goto read;
7545
Steven Rostedtb6273442013-02-28 13:44:11 -05007546 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007547 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007548 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007549 &info->spare,
7550 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007551 iter->cpu_file, 0);
7552 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007553
7554 if (ret < 0) {
7555 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007556 if ((filp->f_flags & O_NONBLOCK))
7557 return -EAGAIN;
7558
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007559 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007560 if (ret)
7561 return ret;
7562
Steven Rostedtb6273442013-02-28 13:44:11 -05007563 goto again;
7564 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007565 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007566 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007567
Steven Rostedt436fc282011-10-14 10:44:25 -04007568 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007569 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007570 size = PAGE_SIZE - info->read;
7571 if (size > count)
7572 size = count;
7573
7574 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007575 if (ret == size)
7576 return -EFAULT;
7577
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007578 size -= ret;
7579
Steven Rostedt2cadf912008-12-01 22:20:19 -05007580 *ppos += size;
7581 info->read += size;
7582
7583 return size;
7584}
7585
7586static int tracing_buffers_release(struct inode *inode, struct file *file)
7587{
7588 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007589 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007590
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007591 mutex_lock(&trace_types_lock);
7592
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007593 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007594
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007595 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007596
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007597 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007598 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007599 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007600 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007601
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007602 mutex_unlock(&trace_types_lock);
7603
Steven Rostedt2cadf912008-12-01 22:20:19 -05007604 return 0;
7605}
7606
7607struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007608 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007609 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007610 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007611 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007612};
7613
Jann Hornb9872222019-04-04 23:59:25 +02007614static void buffer_ref_release(struct buffer_ref *ref)
7615{
7616 if (!refcount_dec_and_test(&ref->refcount))
7617 return;
7618 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7619 kfree(ref);
7620}
7621
Steven Rostedt2cadf912008-12-01 22:20:19 -05007622static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7623 struct pipe_buffer *buf)
7624{
7625 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7626
Jann Hornb9872222019-04-04 23:59:25 +02007627 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007628 buf->private = 0;
7629}
7630
Matthew Wilcox15fab632019-04-05 14:02:10 -07007631static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007632 struct pipe_buffer *buf)
7633{
7634 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7635
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007636 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007637 return false;
7638
Jann Hornb9872222019-04-04 23:59:25 +02007639 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007640 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007641}
7642
7643/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007644static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007645 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007646 .get = buffer_pipe_buf_get,
7647};
7648
7649/*
7650 * Callback from splice_to_pipe(), if we need to release some pages
7651 * at the end of the spd in case we error'ed out in filling the pipe.
7652 */
7653static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7654{
7655 struct buffer_ref *ref =
7656 (struct buffer_ref *)spd->partial[i].private;
7657
Jann Hornb9872222019-04-04 23:59:25 +02007658 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007659 spd->partial[i].private = 0;
7660}
7661
7662static ssize_t
7663tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7664 struct pipe_inode_info *pipe, size_t len,
7665 unsigned int flags)
7666{
7667 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007668 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007669 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7670 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007671 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007672 .pages = pages_def,
7673 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007674 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007675 .ops = &buffer_pipe_buf_ops,
7676 .spd_release = buffer_spd_release,
7677 };
7678 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007679 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007680 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007681
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007682#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007683 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7684 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007685#endif
7686
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007687 if (*ppos & (PAGE_SIZE - 1))
7688 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007689
7690 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007691 if (len < PAGE_SIZE)
7692 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007693 len &= PAGE_MASK;
7694 }
7695
Al Viro1ae22932016-09-17 18:31:46 -04007696 if (splice_grow_spd(pipe, &spd))
7697 return -ENOMEM;
7698
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007699 again:
7700 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007701 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007702
Al Viroa786c062014-04-11 12:01:03 -04007703 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007704 struct page *page;
7705 int r;
7706
7707 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007708 if (!ref) {
7709 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007710 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007711 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007712
Jann Hornb9872222019-04-04 23:59:25 +02007713 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007714 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007715 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007716 if (IS_ERR(ref->page)) {
7717 ret = PTR_ERR(ref->page);
7718 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007719 kfree(ref);
7720 break;
7721 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007722 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007723
7724 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007725 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007726 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007727 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7728 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007729 kfree(ref);
7730 break;
7731 }
7732
Steven Rostedt2cadf912008-12-01 22:20:19 -05007733 page = virt_to_page(ref->page);
7734
7735 spd.pages[i] = page;
7736 spd.partial[i].len = PAGE_SIZE;
7737 spd.partial[i].offset = 0;
7738 spd.partial[i].private = (unsigned long)ref;
7739 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007740 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007741
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007742 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007743 }
7744
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007745 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007746 spd.nr_pages = i;
7747
7748 /* did we read anything? */
7749 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007750 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007751 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007752
Al Viro1ae22932016-09-17 18:31:46 -04007753 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007754 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007755 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007756
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007757 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007758 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007759 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007760
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007761 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007762 }
7763
7764 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007765out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007766 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007767
Steven Rostedt2cadf912008-12-01 22:20:19 -05007768 return ret;
7769}
7770
7771static const struct file_operations tracing_buffers_fops = {
7772 .open = tracing_buffers_open,
7773 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007774 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007775 .release = tracing_buffers_release,
7776 .splice_read = tracing_buffers_splice_read,
7777 .llseek = no_llseek,
7778};
7779
Steven Rostedtc8d77182009-04-29 18:03:45 -04007780static ssize_t
7781tracing_stats_read(struct file *filp, char __user *ubuf,
7782 size_t count, loff_t *ppos)
7783{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007784 struct inode *inode = file_inode(filp);
7785 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007786 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007787 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007788 struct trace_seq *s;
7789 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007790 unsigned long long t;
7791 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007792
Li Zefane4f2d102009-06-15 10:57:28 +08007793 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007794 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007795 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007796
7797 trace_seq_init(s);
7798
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007799 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007800 trace_seq_printf(s, "entries: %ld\n", cnt);
7801
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007802 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007803 trace_seq_printf(s, "overrun: %ld\n", cnt);
7804
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007805 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007806 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7807
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007808 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007809 trace_seq_printf(s, "bytes: %ld\n", cnt);
7810
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007811 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007812 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007813 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007814 usec_rem = do_div(t, USEC_PER_SEC);
7815 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7816 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007817
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007818 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007819 usec_rem = do_div(t, USEC_PER_SEC);
7820 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7821 } else {
7822 /* counter or tsc mode for trace_clock */
7823 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007824 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007825
7826 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007827 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007828 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007829
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007830 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007831 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7832
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007833 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007834 trace_seq_printf(s, "read events: %ld\n", cnt);
7835
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007836 count = simple_read_from_buffer(ubuf, count, ppos,
7837 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007838
7839 kfree(s);
7840
7841 return count;
7842}
7843
7844static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007845 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007846 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007847 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007848 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007849};
7850
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007851#ifdef CONFIG_DYNAMIC_FTRACE
7852
7853static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007854tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007855 size_t cnt, loff_t *ppos)
7856{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007857 ssize_t ret;
7858 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007859 int r;
7860
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007861 /* 256 should be plenty to hold the amount needed */
7862 buf = kmalloc(256, GFP_KERNEL);
7863 if (!buf)
7864 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007865
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007866 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7867 ftrace_update_tot_cnt,
7868 ftrace_number_of_pages,
7869 ftrace_number_of_groups);
7870
7871 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7872 kfree(buf);
7873 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007874}
7875
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007876static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007877 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007878 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007879 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007880};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007881#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007882
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007883#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7884static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007885ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007886 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007887 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007888{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007889 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007890}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007891
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007892static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007893ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007894 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007895 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007896{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007897 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007898 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007899
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007900 if (mapper)
7901 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007902
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007903 if (count) {
7904
7905 if (*count <= 0)
7906 return;
7907
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007908 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007909 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007910
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007911 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007912}
7913
7914static int
7915ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7916 struct ftrace_probe_ops *ops, void *data)
7917{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007918 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007919 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007920
7921 seq_printf(m, "%ps:", (void *)ip);
7922
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007923 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007924
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007925 if (mapper)
7926 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7927
7928 if (count)
7929 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007930 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007931 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007932
7933 return 0;
7934}
7935
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007936static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007937ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007938 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007939{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007940 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007941
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007942 if (!mapper) {
7943 mapper = allocate_ftrace_func_mapper();
7944 if (!mapper)
7945 return -ENOMEM;
7946 *data = mapper;
7947 }
7948
7949 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007950}
7951
7952static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007953ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007954 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007955{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007956 struct ftrace_func_mapper *mapper = data;
7957
7958 if (!ip) {
7959 if (!mapper)
7960 return;
7961 free_ftrace_func_mapper(mapper, NULL);
7962 return;
7963 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007964
7965 ftrace_func_mapper_remove_ip(mapper, ip);
7966}
7967
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007968static struct ftrace_probe_ops snapshot_probe_ops = {
7969 .func = ftrace_snapshot,
7970 .print = ftrace_snapshot_print,
7971};
7972
7973static struct ftrace_probe_ops snapshot_count_probe_ops = {
7974 .func = ftrace_count_snapshot,
7975 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007976 .init = ftrace_snapshot_init,
7977 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007978};
7979
7980static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007981ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007982 char *glob, char *cmd, char *param, int enable)
7983{
7984 struct ftrace_probe_ops *ops;
7985 void *count = (void *)-1;
7986 char *number;
7987 int ret;
7988
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007989 if (!tr)
7990 return -ENODEV;
7991
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007992 /* hash funcs only work with set_ftrace_filter */
7993 if (!enable)
7994 return -EINVAL;
7995
7996 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7997
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007998 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007999 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008000
8001 if (!param)
8002 goto out_reg;
8003
8004 number = strsep(&param, ":");
8005
8006 if (!strlen(number))
8007 goto out_reg;
8008
8009 /*
8010 * We use the callback data field (which is a pointer)
8011 * as our counter.
8012 */
8013 ret = kstrtoul(number, 0, (unsigned long *)&count);
8014 if (ret)
8015 return ret;
8016
8017 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008018 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008019 if (ret < 0)
8020 goto out;
8021
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008022 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008023
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008024 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008025 return ret < 0 ? ret : 0;
8026}
8027
8028static struct ftrace_func_command ftrace_snapshot_cmd = {
8029 .name = "snapshot",
8030 .func = ftrace_trace_snapshot_callback,
8031};
8032
Tom Zanussi38de93a2013-10-24 08:34:18 -05008033static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008034{
8035 return register_ftrace_command(&ftrace_snapshot_cmd);
8036}
8037#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008038static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008039#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008040
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008041static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008042{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008043 if (WARN_ON(!tr->dir))
8044 return ERR_PTR(-ENODEV);
8045
8046 /* Top directory uses NULL as the parent */
8047 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8048 return NULL;
8049
8050 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008051 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008052}
8053
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008054static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8055{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008056 struct dentry *d_tracer;
8057
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008058 if (tr->percpu_dir)
8059 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008060
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008061 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008062 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008063 return NULL;
8064
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008065 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008066
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008067 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008068 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008069
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008070 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008071}
8072
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008073static struct dentry *
8074trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8075 void *data, long cpu, const struct file_operations *fops)
8076{
8077 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8078
8079 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008080 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008081 return ret;
8082}
8083
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008084static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008085tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008086{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008087 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008088 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008089 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008090
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008091 if (!d_percpu)
8092 return;
8093
Steven Rostedtdd49a382010-10-20 21:51:26 -04008094 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008095 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008096 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008097 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008098 return;
8099 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008100
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008101 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008102 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008103 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008104
8105 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008106 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008107 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008108
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008109 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008110 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008111
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008112 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008113 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008114
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008115 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008116 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008117
8118#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008119 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008120 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008121
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008122 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008123 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008124#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008125}
8126
Steven Rostedt60a11772008-05-12 21:20:44 +02008127#ifdef CONFIG_FTRACE_SELFTEST
8128/* Let selftest have access to static functions in this file */
8129#include "trace_selftest.c"
8130#endif
8131
Steven Rostedt577b7852009-02-26 23:43:05 -05008132static ssize_t
8133trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8134 loff_t *ppos)
8135{
8136 struct trace_option_dentry *topt = filp->private_data;
8137 char *buf;
8138
8139 if (topt->flags->val & topt->opt->bit)
8140 buf = "1\n";
8141 else
8142 buf = "0\n";
8143
8144 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8145}
8146
8147static ssize_t
8148trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8149 loff_t *ppos)
8150{
8151 struct trace_option_dentry *topt = filp->private_data;
8152 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008153 int ret;
8154
Peter Huewe22fe9b52011-06-07 21:58:27 +02008155 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8156 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008157 return ret;
8158
Li Zefan8d18eaa2009-12-08 11:17:06 +08008159 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008160 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008161
8162 if (!!(topt->flags->val & topt->opt->bit) != val) {
8163 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008164 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008165 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008166 mutex_unlock(&trace_types_lock);
8167 if (ret)
8168 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008169 }
8170
8171 *ppos += cnt;
8172
8173 return cnt;
8174}
8175
8176
8177static const struct file_operations trace_options_fops = {
8178 .open = tracing_open_generic,
8179 .read = trace_options_read,
8180 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008181 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008182};
8183
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008184/*
8185 * In order to pass in both the trace_array descriptor as well as the index
8186 * to the flag that the trace option file represents, the trace_array
8187 * has a character array of trace_flags_index[], which holds the index
8188 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8189 * The address of this character array is passed to the flag option file
8190 * read/write callbacks.
8191 *
8192 * In order to extract both the index and the trace_array descriptor,
8193 * get_tr_index() uses the following algorithm.
8194 *
8195 * idx = *ptr;
8196 *
8197 * As the pointer itself contains the address of the index (remember
8198 * index[1] == 1).
8199 *
8200 * Then to get the trace_array descriptor, by subtracting that index
8201 * from the ptr, we get to the start of the index itself.
8202 *
8203 * ptr - idx == &index[0]
8204 *
8205 * Then a simple container_of() from that pointer gets us to the
8206 * trace_array descriptor.
8207 */
8208static void get_tr_index(void *data, struct trace_array **ptr,
8209 unsigned int *pindex)
8210{
8211 *pindex = *(unsigned char *)data;
8212
8213 *ptr = container_of(data - *pindex, struct trace_array,
8214 trace_flags_index);
8215}
8216
Steven Rostedta8259072009-02-26 22:19:12 -05008217static ssize_t
8218trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8219 loff_t *ppos)
8220{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008221 void *tr_index = filp->private_data;
8222 struct trace_array *tr;
8223 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008224 char *buf;
8225
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008226 get_tr_index(tr_index, &tr, &index);
8227
8228 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008229 buf = "1\n";
8230 else
8231 buf = "0\n";
8232
8233 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8234}
8235
8236static ssize_t
8237trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8238 loff_t *ppos)
8239{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008240 void *tr_index = filp->private_data;
8241 struct trace_array *tr;
8242 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008243 unsigned long val;
8244 int ret;
8245
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008246 get_tr_index(tr_index, &tr, &index);
8247
Peter Huewe22fe9b52011-06-07 21:58:27 +02008248 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8249 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008250 return ret;
8251
Zhaoleif2d84b62009-08-07 18:55:48 +08008252 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008253 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008254
Prateek Sood3a53acf2019-12-10 09:15:16 +00008255 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008256 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008257 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008258 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008259 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008260
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008261 if (ret < 0)
8262 return ret;
8263
Steven Rostedta8259072009-02-26 22:19:12 -05008264 *ppos += cnt;
8265
8266 return cnt;
8267}
8268
Steven Rostedta8259072009-02-26 22:19:12 -05008269static const struct file_operations trace_options_core_fops = {
8270 .open = tracing_open_generic,
8271 .read = trace_options_core_read,
8272 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008273 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008274};
8275
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008276struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008277 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008278 struct dentry *parent,
8279 void *data,
8280 const struct file_operations *fops)
8281{
8282 struct dentry *ret;
8283
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008284 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008285 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008286 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008287
8288 return ret;
8289}
8290
8291
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008292static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008293{
8294 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008295
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008296 if (tr->options)
8297 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008298
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008299 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008300 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008301 return NULL;
8302
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008303 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008304 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008305 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008306 return NULL;
8307 }
8308
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008309 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008310}
8311
Steven Rostedt577b7852009-02-26 23:43:05 -05008312static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008313create_trace_option_file(struct trace_array *tr,
8314 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008315 struct tracer_flags *flags,
8316 struct tracer_opt *opt)
8317{
8318 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008319
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008320 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008321 if (!t_options)
8322 return;
8323
8324 topt->flags = flags;
8325 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008326 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008327
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008328 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008329 &trace_options_fops);
8330
Steven Rostedt577b7852009-02-26 23:43:05 -05008331}
8332
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008333static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008334create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008335{
8336 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008337 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008338 struct tracer_flags *flags;
8339 struct tracer_opt *opts;
8340 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008341 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008342
8343 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008344 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008345
8346 flags = tracer->flags;
8347
8348 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008349 return;
8350
8351 /*
8352 * If this is an instance, only create flags for tracers
8353 * the instance may have.
8354 */
8355 if (!trace_ok_for_array(tracer, tr))
8356 return;
8357
8358 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008359 /* Make sure there's no duplicate flags. */
8360 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008361 return;
8362 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008363
8364 opts = flags->opts;
8365
8366 for (cnt = 0; opts[cnt].name; cnt++)
8367 ;
8368
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008369 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008370 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008371 return;
8372
8373 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8374 GFP_KERNEL);
8375 if (!tr_topts) {
8376 kfree(topts);
8377 return;
8378 }
8379
8380 tr->topts = tr_topts;
8381 tr->topts[tr->nr_topts].tracer = tracer;
8382 tr->topts[tr->nr_topts].topts = topts;
8383 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008384
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008385 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008386 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008387 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008388 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008389 "Failed to create trace option: %s",
8390 opts[cnt].name);
8391 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008392}
8393
Steven Rostedta8259072009-02-26 22:19:12 -05008394static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008395create_trace_option_core_file(struct trace_array *tr,
8396 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008397{
8398 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008399
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008400 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008401 if (!t_options)
8402 return NULL;
8403
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008404 return trace_create_file(option, 0644, t_options,
8405 (void *)&tr->trace_flags_index[index],
8406 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008407}
8408
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008409static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008410{
8411 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008412 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008413 int i;
8414
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008415 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008416 if (!t_options)
8417 return;
8418
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008419 for (i = 0; trace_options[i]; i++) {
8420 if (top_level ||
8421 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8422 create_trace_option_core_file(tr, trace_options[i], i);
8423 }
Steven Rostedta8259072009-02-26 22:19:12 -05008424}
8425
Steven Rostedt499e5472012-02-22 15:50:28 -05008426static ssize_t
8427rb_simple_read(struct file *filp, char __user *ubuf,
8428 size_t cnt, loff_t *ppos)
8429{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008430 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008431 char buf[64];
8432 int r;
8433
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008434 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008435 r = sprintf(buf, "%d\n", r);
8436
8437 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8438}
8439
8440static ssize_t
8441rb_simple_write(struct file *filp, const char __user *ubuf,
8442 size_t cnt, loff_t *ppos)
8443{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008444 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008445 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008446 unsigned long val;
8447 int ret;
8448
8449 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8450 if (ret)
8451 return ret;
8452
8453 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008454 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008455 if (!!val == tracer_tracing_is_on(tr)) {
8456 val = 0; /* do nothing */
8457 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008458 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008459 if (tr->current_trace->start)
8460 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008461 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008462 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008463 if (tr->current_trace->stop)
8464 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008465 }
8466 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008467 }
8468
8469 (*ppos)++;
8470
8471 return cnt;
8472}
8473
8474static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008475 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008476 .read = rb_simple_read,
8477 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008478 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008479 .llseek = default_llseek,
8480};
8481
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008482static ssize_t
8483buffer_percent_read(struct file *filp, char __user *ubuf,
8484 size_t cnt, loff_t *ppos)
8485{
8486 struct trace_array *tr = filp->private_data;
8487 char buf[64];
8488 int r;
8489
8490 r = tr->buffer_percent;
8491 r = sprintf(buf, "%d\n", r);
8492
8493 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8494}
8495
8496static ssize_t
8497buffer_percent_write(struct file *filp, const char __user *ubuf,
8498 size_t cnt, loff_t *ppos)
8499{
8500 struct trace_array *tr = filp->private_data;
8501 unsigned long val;
8502 int ret;
8503
8504 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8505 if (ret)
8506 return ret;
8507
8508 if (val > 100)
8509 return -EINVAL;
8510
8511 if (!val)
8512 val = 1;
8513
8514 tr->buffer_percent = val;
8515
8516 (*ppos)++;
8517
8518 return cnt;
8519}
8520
8521static const struct file_operations buffer_percent_fops = {
8522 .open = tracing_open_generic_tr,
8523 .read = buffer_percent_read,
8524 .write = buffer_percent_write,
8525 .release = tracing_release_generic_tr,
8526 .llseek = default_llseek,
8527};
8528
YueHaibingff585c52019-06-14 23:32:10 +08008529static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008530
8531static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008532init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008533
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008534static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008535allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008536{
8537 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008538
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008539 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008540
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008541 buf->tr = tr;
8542
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008543 buf->buffer = ring_buffer_alloc(size, rb_flags);
8544 if (!buf->buffer)
8545 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008546
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008547 buf->data = alloc_percpu(struct trace_array_cpu);
8548 if (!buf->data) {
8549 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008550 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008551 return -ENOMEM;
8552 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008553
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008554 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008555 set_buffer_entries(&tr->array_buffer,
8556 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008557
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008558 return 0;
8559}
8560
8561static int allocate_trace_buffers(struct trace_array *tr, int size)
8562{
8563 int ret;
8564
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008565 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008566 if (ret)
8567 return ret;
8568
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008569#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008570 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8571 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008572 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008573 ring_buffer_free(tr->array_buffer.buffer);
8574 tr->array_buffer.buffer = NULL;
8575 free_percpu(tr->array_buffer.data);
8576 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008577 return -ENOMEM;
8578 }
8579 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008580
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008581 /*
8582 * Only the top level trace array gets its snapshot allocated
8583 * from the kernel command line.
8584 */
8585 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008586#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008587
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008588 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008589}
8590
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008591static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008592{
8593 if (buf->buffer) {
8594 ring_buffer_free(buf->buffer);
8595 buf->buffer = NULL;
8596 free_percpu(buf->data);
8597 buf->data = NULL;
8598 }
8599}
8600
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008601static void free_trace_buffers(struct trace_array *tr)
8602{
8603 if (!tr)
8604 return;
8605
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008606 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008607
8608#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008609 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008610#endif
8611}
8612
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008613static void init_trace_flags_index(struct trace_array *tr)
8614{
8615 int i;
8616
8617 /* Used by the trace options files */
8618 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8619 tr->trace_flags_index[i] = i;
8620}
8621
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008622static void __update_tracer_options(struct trace_array *tr)
8623{
8624 struct tracer *t;
8625
8626 for (t = trace_types; t; t = t->next)
8627 add_tracer_options(tr, t);
8628}
8629
8630static void update_tracer_options(struct trace_array *tr)
8631{
8632 mutex_lock(&trace_types_lock);
8633 __update_tracer_options(tr);
8634 mutex_unlock(&trace_types_lock);
8635}
8636
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008637/* Must have trace_types_lock held */
8638struct trace_array *trace_array_find(const char *instance)
8639{
8640 struct trace_array *tr, *found = NULL;
8641
8642 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8643 if (tr->name && strcmp(tr->name, instance) == 0) {
8644 found = tr;
8645 break;
8646 }
8647 }
8648
8649 return found;
8650}
8651
8652struct trace_array *trace_array_find_get(const char *instance)
8653{
8654 struct trace_array *tr;
8655
8656 mutex_lock(&trace_types_lock);
8657 tr = trace_array_find(instance);
8658 if (tr)
8659 tr->ref++;
8660 mutex_unlock(&trace_types_lock);
8661
8662 return tr;
8663}
8664
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008665static int trace_array_create_dir(struct trace_array *tr)
8666{
8667 int ret;
8668
8669 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8670 if (!tr->dir)
8671 return -EINVAL;
8672
8673 ret = event_trace_add_tracer(tr->dir, tr);
8674 if (ret)
8675 tracefs_remove(tr->dir);
8676
8677 init_tracer_tracefs(tr, tr->dir);
8678 __update_tracer_options(tr);
8679
8680 return ret;
8681}
8682
Divya Indi28879782019-11-20 11:08:38 -08008683static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008684{
Steven Rostedt277ba042012-08-03 16:10:49 -04008685 struct trace_array *tr;
8686 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008687
Steven Rostedt277ba042012-08-03 16:10:49 -04008688 ret = -ENOMEM;
8689 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8690 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008691 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008692
8693 tr->name = kstrdup(name, GFP_KERNEL);
8694 if (!tr->name)
8695 goto out_free_tr;
8696
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008697 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8698 goto out_free_tr;
8699
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008700 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008701
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008702 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8703
Steven Rostedt277ba042012-08-03 16:10:49 -04008704 raw_spin_lock_init(&tr->start_lock);
8705
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008706 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8707
Steven Rostedt277ba042012-08-03 16:10:49 -04008708 tr->current_trace = &nop_trace;
8709
8710 INIT_LIST_HEAD(&tr->systems);
8711 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008712 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008713 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008714
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008715 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008716 goto out_free_tr;
8717
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008718 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008719 goto out_free_tr;
8720
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008721 ftrace_init_trace_array(tr);
8722
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008723 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008724
8725 if (trace_instance_dir) {
8726 ret = trace_array_create_dir(tr);
8727 if (ret)
8728 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09008729 } else
8730 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008731
8732 list_add(&tr->list, &ftrace_trace_arrays);
8733
Divya Indi28879782019-11-20 11:08:38 -08008734 tr->ref++;
8735
Divya Indif45d1222019-03-20 11:28:51 -07008736 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008737
8738 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008739 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008740 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008741 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008742 kfree(tr->name);
8743 kfree(tr);
8744
Divya Indif45d1222019-03-20 11:28:51 -07008745 return ERR_PTR(ret);
8746}
Steven Rostedt277ba042012-08-03 16:10:49 -04008747
Divya Indif45d1222019-03-20 11:28:51 -07008748static int instance_mkdir(const char *name)
8749{
Divya Indi28879782019-11-20 11:08:38 -08008750 struct trace_array *tr;
8751 int ret;
8752
8753 mutex_lock(&event_mutex);
8754 mutex_lock(&trace_types_lock);
8755
8756 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008757 if (trace_array_find(name))
8758 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008759
8760 tr = trace_array_create(name);
8761
8762 ret = PTR_ERR_OR_ZERO(tr);
8763
8764out_unlock:
8765 mutex_unlock(&trace_types_lock);
8766 mutex_unlock(&event_mutex);
8767 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008768}
8769
Divya Indi28879782019-11-20 11:08:38 -08008770/**
8771 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8772 * @name: The name of the trace array to be looked up/created.
8773 *
8774 * Returns pointer to trace array with given name.
8775 * NULL, if it cannot be created.
8776 *
8777 * NOTE: This function increments the reference counter associated with the
8778 * trace array returned. This makes sure it cannot be freed while in use.
8779 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008780 * If the trace_array is to be freed, trace_array_destroy() needs to
8781 * be called after the trace_array_put(), or simply let user space delete
8782 * it from the tracefs instances directory. But until the
8783 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008784 *
8785 */
8786struct trace_array *trace_array_get_by_name(const char *name)
8787{
8788 struct trace_array *tr;
8789
8790 mutex_lock(&event_mutex);
8791 mutex_lock(&trace_types_lock);
8792
8793 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8794 if (tr->name && strcmp(tr->name, name) == 0)
8795 goto out_unlock;
8796 }
8797
8798 tr = trace_array_create(name);
8799
8800 if (IS_ERR(tr))
8801 tr = NULL;
8802out_unlock:
8803 if (tr)
8804 tr->ref++;
8805
8806 mutex_unlock(&trace_types_lock);
8807 mutex_unlock(&event_mutex);
8808 return tr;
8809}
8810EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8811
Divya Indif45d1222019-03-20 11:28:51 -07008812static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008813{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008814 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008815
Divya Indi28879782019-11-20 11:08:38 -08008816 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008817 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07008818 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008819
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008820 list_del(&tr->list);
8821
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008822 /* Disable all the flags that were enabled coming in */
8823 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8824 if ((1 << i) & ZEROED_TRACE_FLAGS)
8825 set_tracer_flag(tr, 1 << i, 0);
8826 }
8827
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008828 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308829 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008830 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008831 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008832 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008833 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008834 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008835
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008836 for (i = 0; i < tr->nr_topts; i++) {
8837 kfree(tr->topts[i].topts);
8838 }
8839 kfree(tr->topts);
8840
Chunyu Hudb9108e02017-07-20 18:36:09 +08008841 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008842 kfree(tr->name);
8843 kfree(tr);
8844
Divya Indif45d1222019-03-20 11:28:51 -07008845 return 0;
8846}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008847
Divya Indie585e642019-08-14 10:55:24 -07008848int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008849{
Divya Indie585e642019-08-14 10:55:24 -07008850 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008851 int ret;
8852
Divya Indie585e642019-08-14 10:55:24 -07008853 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008854 return -EINVAL;
8855
8856 mutex_lock(&event_mutex);
8857 mutex_lock(&trace_types_lock);
8858
Divya Indie585e642019-08-14 10:55:24 -07008859 ret = -ENODEV;
8860
8861 /* Making sure trace array exists before destroying it. */
8862 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8863 if (tr == this_tr) {
8864 ret = __remove_instance(tr);
8865 break;
8866 }
8867 }
Divya Indif45d1222019-03-20 11:28:51 -07008868
8869 mutex_unlock(&trace_types_lock);
8870 mutex_unlock(&event_mutex);
8871
8872 return ret;
8873}
8874EXPORT_SYMBOL_GPL(trace_array_destroy);
8875
8876static int instance_rmdir(const char *name)
8877{
8878 struct trace_array *tr;
8879 int ret;
8880
8881 mutex_lock(&event_mutex);
8882 mutex_lock(&trace_types_lock);
8883
8884 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008885 tr = trace_array_find(name);
8886 if (tr)
8887 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008888
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008889 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008890 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008891
8892 return ret;
8893}
8894
Steven Rostedt277ba042012-08-03 16:10:49 -04008895static __init void create_trace_instances(struct dentry *d_tracer)
8896{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008897 struct trace_array *tr;
8898
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008899 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8900 instance_mkdir,
8901 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008902 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008903 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008904
8905 mutex_lock(&event_mutex);
8906 mutex_lock(&trace_types_lock);
8907
8908 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8909 if (!tr->name)
8910 continue;
8911 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8912 "Failed to create instance directory\n"))
8913 break;
8914 }
8915
8916 mutex_unlock(&trace_types_lock);
8917 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008918}
8919
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008920static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008921init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008922{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008923 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008924 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008925
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008926 trace_create_file("available_tracers", 0444, d_tracer,
8927 tr, &show_traces_fops);
8928
8929 trace_create_file("current_tracer", 0644, d_tracer,
8930 tr, &set_tracer_fops);
8931
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008932 trace_create_file("tracing_cpumask", 0644, d_tracer,
8933 tr, &tracing_cpumask_fops);
8934
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008935 trace_create_file("trace_options", 0644, d_tracer,
8936 tr, &tracing_iter_fops);
8937
8938 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008939 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008940
8941 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008942 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008943
8944 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008945 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008946
8947 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8948 tr, &tracing_total_entries_fops);
8949
Wang YanQing238ae932013-05-26 16:52:01 +08008950 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008951 tr, &tracing_free_buffer_fops);
8952
8953 trace_create_file("trace_marker", 0220, d_tracer,
8954 tr, &tracing_mark_fops);
8955
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008956 file = __find_event_file(tr, "ftrace", "print");
8957 if (file && file->dir)
8958 trace_create_file("trigger", 0644, file->dir, file,
8959 &event_trigger_fops);
8960 tr->trace_marker_file = file;
8961
Steven Rostedtfa32e852016-07-06 15:25:08 -04008962 trace_create_file("trace_marker_raw", 0220, d_tracer,
8963 tr, &tracing_mark_raw_fops);
8964
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008965 trace_create_file("trace_clock", 0644, d_tracer, tr,
8966 &trace_clock_fops);
8967
8968 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008969 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008970
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008971 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8972 &trace_time_stamp_mode_fops);
8973
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008974 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008975
8976 trace_create_file("buffer_percent", 0444, d_tracer,
8977 tr, &buffer_percent_fops);
8978
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008979 create_trace_options_dir(tr);
8980
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008981#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008982 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008983#endif
8984
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008985 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008986 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008987
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008988#ifdef CONFIG_TRACER_SNAPSHOT
8989 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008990 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008991#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008992
Tom Zanussi8a062902019-03-31 18:48:15 -05008993 trace_create_file("error_log", 0644, d_tracer,
8994 tr, &tracing_err_log_fops);
8995
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008996 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008997 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008998
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008999 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009000}
9001
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009002static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009003{
9004 struct vfsmount *mnt;
9005 struct file_system_type *type;
9006
9007 /*
9008 * To maintain backward compatibility for tools that mount
9009 * debugfs to get to the tracing facility, tracefs is automatically
9010 * mounted to the debugfs/tracing directory.
9011 */
9012 type = get_fs_type("tracefs");
9013 if (!type)
9014 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009015 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009016 put_filesystem(type);
9017 if (IS_ERR(mnt))
9018 return NULL;
9019 mntget(mnt);
9020
9021 return mnt;
9022}
9023
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009024/**
9025 * tracing_init_dentry - initialize top level trace array
9026 *
9027 * This is called when creating files or directories in the tracing
9028 * directory. It is called via fs_initcall() by any of the boot up code
9029 * and expects to return the dentry of the top level tracing directory.
9030 */
Wei Yang22c36b12020-07-12 09:10:36 +08009031int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009032{
9033 struct trace_array *tr = &global_trace;
9034
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009035 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009036 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009037 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009038 }
9039
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009040 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009041 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009042 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009043
Peter Enderborg072e1332020-07-16 09:15:10 +02009044 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009045 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009046
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009047 /*
9048 * As there may still be users that expect the tracing
9049 * files to exist in debugfs/tracing, we must automount
9050 * the tracefs file system there, so older tools still
9051 * work with the newer kerenl.
9052 */
9053 tr->dir = debugfs_create_automount("tracing", NULL,
9054 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009055
Wei Yang22c36b12020-07-12 09:10:36 +08009056 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009057}
9058
Jeremy Linton00f4b652017-05-31 16:56:43 -05009059extern struct trace_eval_map *__start_ftrace_eval_maps[];
9060extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009061
Jeremy Linton5f60b352017-05-31 16:56:47 -05009062static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009063{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009064 int len;
9065
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009066 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009067 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009068}
9069
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009070#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009071static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009072{
Jeremy Linton99be6472017-05-31 16:56:44 -05009073 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009074 return;
9075
9076 /*
9077 * Modules with bad taint do not have events created, do
9078 * not bother with enums either.
9079 */
9080 if (trace_module_has_bad_taint(mod))
9081 return;
9082
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009083 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009084}
9085
Jeremy Linton681bec02017-05-31 16:56:53 -05009086#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009087static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009088{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009089 union trace_eval_map_item *map;
9090 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009091
Jeremy Linton99be6472017-05-31 16:56:44 -05009092 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009093 return;
9094
Jeremy Linton1793ed92017-05-31 16:56:46 -05009095 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009096
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009097 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009098
9099 while (map) {
9100 if (map->head.mod == mod)
9101 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009102 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009103 last = &map->tail.next;
9104 map = map->tail.next;
9105 }
9106 if (!map)
9107 goto out;
9108
Jeremy Linton5f60b352017-05-31 16:56:47 -05009109 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009110 kfree(map);
9111 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009112 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009113}
9114#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009115static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009116#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009117
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009118static int trace_module_notify(struct notifier_block *self,
9119 unsigned long val, void *data)
9120{
9121 struct module *mod = data;
9122
9123 switch (val) {
9124 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009125 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009126 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009127 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009128 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009129 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009130 }
9131
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009132 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009133}
9134
9135static struct notifier_block trace_module_nb = {
9136 .notifier_call = trace_module_notify,
9137 .priority = 0,
9138};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009139#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009140
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009141static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009142{
Wei Yang22c36b12020-07-12 09:10:36 +08009143 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009144
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009145 trace_access_lock_init();
9146
Wei Yang22c36b12020-07-12 09:10:36 +08009147 ret = tracing_init_dentry();
9148 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009149 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009150
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009151 event_trace_init();
9152
Wei Yang22c36b12020-07-12 09:10:36 +08009153 init_tracer_tracefs(&global_trace, NULL);
9154 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009155
Wei Yang22c36b12020-07-12 09:10:36 +08009156 trace_create_file("tracing_thresh", 0644, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009157 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009158
Wei Yang22c36b12020-07-12 09:10:36 +08009159 trace_create_file("README", 0444, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009160 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009161
Wei Yang22c36b12020-07-12 09:10:36 +08009162 trace_create_file("saved_cmdlines", 0444, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009163 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009164
Wei Yang22c36b12020-07-12 09:10:36 +08009165 trace_create_file("saved_cmdlines_size", 0644, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009166 NULL, &tracing_saved_cmdlines_size_fops);
9167
Wei Yang22c36b12020-07-12 09:10:36 +08009168 trace_create_file("saved_tgids", 0444, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009169 NULL, &tracing_saved_tgids_fops);
9170
Jeremy Linton5f60b352017-05-31 16:56:47 -05009171 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009172
Wei Yang22c36b12020-07-12 09:10:36 +08009173 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009174
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009175#ifdef CONFIG_MODULES
9176 register_module_notifier(&trace_module_nb);
9177#endif
9178
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009179#ifdef CONFIG_DYNAMIC_FTRACE
Wei Yang22c36b12020-07-12 09:10:36 +08009180 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009181 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009182#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009183
Wei Yang22c36b12020-07-12 09:10:36 +08009184 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009185
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009186 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009187
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009188 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009189}
9190
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009191static int trace_panic_handler(struct notifier_block *this,
9192 unsigned long event, void *unused)
9193{
Steven Rostedt944ac422008-10-23 19:26:08 -04009194 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009195 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009196 return NOTIFY_OK;
9197}
9198
9199static struct notifier_block trace_panic_notifier = {
9200 .notifier_call = trace_panic_handler,
9201 .next = NULL,
9202 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9203};
9204
9205static int trace_die_handler(struct notifier_block *self,
9206 unsigned long val,
9207 void *data)
9208{
9209 switch (val) {
9210 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009211 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009212 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009213 break;
9214 default:
9215 break;
9216 }
9217 return NOTIFY_OK;
9218}
9219
9220static struct notifier_block trace_die_notifier = {
9221 .notifier_call = trace_die_handler,
9222 .priority = 200
9223};
9224
9225/*
9226 * printk is set to max of 1024, we really don't need it that big.
9227 * Nothing should be printing 1000 characters anyway.
9228 */
9229#define TRACE_MAX_PRINT 1000
9230
9231/*
9232 * Define here KERN_TRACE so that we have one place to modify
9233 * it if we decide to change what log level the ftrace dump
9234 * should be at.
9235 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009236#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009237
Jason Wessel955b61e2010-08-05 09:22:23 -05009238void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009239trace_printk_seq(struct trace_seq *s)
9240{
9241 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009242 if (s->seq.len >= TRACE_MAX_PRINT)
9243 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009244
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009245 /*
9246 * More paranoid code. Although the buffer size is set to
9247 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9248 * an extra layer of protection.
9249 */
9250 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9251 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009252
9253 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009254 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009255
9256 printk(KERN_TRACE "%s", s->buffer);
9257
Steven Rostedtf9520752009-03-02 14:04:40 -05009258 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009259}
9260
Jason Wessel955b61e2010-08-05 09:22:23 -05009261void trace_init_global_iter(struct trace_iterator *iter)
9262{
9263 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009264 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009265 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009266 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009267
9268 if (iter->trace && iter->trace->open)
9269 iter->trace->open(iter);
9270
9271 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009272 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009273 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9274
9275 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9276 if (trace_clocks[iter->tr->clock_id].in_ns)
9277 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009278}
9279
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009280void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009281{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009282 /* use static because iter can be a bit big for the stack */
9283 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009284 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009285 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009286 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009287 unsigned long flags;
9288 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009289
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009290 /* Only allow one dump user at a time. */
9291 if (atomic_inc_return(&dump_running) != 1) {
9292 atomic_dec(&dump_running);
9293 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009294 }
9295
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009296 /*
9297 * Always turn off tracing when we dump.
9298 * We don't need to show trace output of what happens
9299 * between multiple crashes.
9300 *
9301 * If the user does a sysrq-z, then they can re-enable
9302 * tracing with echo 1 > tracing_on.
9303 */
9304 tracing_off();
9305
9306 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009307 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009308
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009309 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009310 trace_init_global_iter(&iter);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009311 /* Can not use kmalloc for iter.temp */
9312 iter.temp = static_temp_buf;
9313 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009314
Steven Rostedtd7690412008-10-01 00:29:53 -04009315 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009316 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009317 }
9318
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009319 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009320
Török Edwinb54d3de2008-11-22 13:28:48 +02009321 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009322 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009323
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009324 switch (oops_dump_mode) {
9325 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009326 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009327 break;
9328 case DUMP_ORIG:
9329 iter.cpu_file = raw_smp_processor_id();
9330 break;
9331 case DUMP_NONE:
9332 goto out_enable;
9333 default:
9334 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009335 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009336 }
9337
9338 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009339
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009340 /* Did function tracer already get disabled? */
9341 if (ftrace_is_dead()) {
9342 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9343 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9344 }
9345
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009346 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009347 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009348 * the next buffer. This is a bit expensive, but is
9349 * not done often. We fill all what we can read,
9350 * and then release the locks again.
9351 */
9352
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009353 while (!trace_empty(&iter)) {
9354
9355 if (!cnt)
9356 printk(KERN_TRACE "---------------------------------\n");
9357
9358 cnt++;
9359
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009360 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009361 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009362
Jason Wessel955b61e2010-08-05 09:22:23 -05009363 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009364 int ret;
9365
9366 ret = print_trace_line(&iter);
9367 if (ret != TRACE_TYPE_NO_CONSUME)
9368 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009369 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009370 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009371
9372 trace_printk_seq(&iter.seq);
9373 }
9374
9375 if (!cnt)
9376 printk(KERN_TRACE " (ftrace buffer empty)\n");
9377 else
9378 printk(KERN_TRACE "---------------------------------\n");
9379
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009380 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009381 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009382
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009383 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009384 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009385 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009386 atomic_dec(&dump_running);
9387 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009388 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009389}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009390EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009391
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009392int trace_run_command(const char *buf, int (*createfn)(int, char **))
9393{
9394 char **argv;
9395 int argc, ret;
9396
9397 argc = 0;
9398 ret = 0;
9399 argv = argv_split(GFP_KERNEL, buf, &argc);
9400 if (!argv)
9401 return -ENOMEM;
9402
9403 if (argc)
9404 ret = createfn(argc, argv);
9405
9406 argv_free(argv);
9407
9408 return ret;
9409}
9410
9411#define WRITE_BUFSIZE 4096
9412
9413ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9414 size_t count, loff_t *ppos,
9415 int (*createfn)(int, char **))
9416{
9417 char *kbuf, *buf, *tmp;
9418 int ret = 0;
9419 size_t done = 0;
9420 size_t size;
9421
9422 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9423 if (!kbuf)
9424 return -ENOMEM;
9425
9426 while (done < count) {
9427 size = count - done;
9428
9429 if (size >= WRITE_BUFSIZE)
9430 size = WRITE_BUFSIZE - 1;
9431
9432 if (copy_from_user(kbuf, buffer + done, size)) {
9433 ret = -EFAULT;
9434 goto out;
9435 }
9436 kbuf[size] = '\0';
9437 buf = kbuf;
9438 do {
9439 tmp = strchr(buf, '\n');
9440 if (tmp) {
9441 *tmp = '\0';
9442 size = tmp - buf + 1;
9443 } else {
9444 size = strlen(buf);
9445 if (done + size < count) {
9446 if (buf != kbuf)
9447 break;
9448 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9449 pr_warn("Line length is too long: Should be less than %d\n",
9450 WRITE_BUFSIZE - 2);
9451 ret = -EINVAL;
9452 goto out;
9453 }
9454 }
9455 done += size;
9456
9457 /* Remove comments */
9458 tmp = strchr(buf, '#');
9459
9460 if (tmp)
9461 *tmp = '\0';
9462
9463 ret = trace_run_command(buf, createfn);
9464 if (ret)
9465 goto out;
9466 buf += size;
9467
9468 } while (done < count);
9469 }
9470 ret = done;
9471
9472out:
9473 kfree(kbuf);
9474
9475 return ret;
9476}
9477
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009478__init static int tracer_alloc_buffers(void)
9479{
Steven Rostedt73c51622009-03-11 13:42:01 -04009480 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309481 int ret = -ENOMEM;
9482
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009483
9484 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009485 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009486 return -EPERM;
9487 }
9488
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009489 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009490 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009491 * than we have bits for.
9492 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009493 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009494
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309495 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9496 goto out;
9497
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009498 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309499 goto out_free_buffer_mask;
9500
Steven Rostedt07d777f2011-09-22 14:01:55 -04009501 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009502 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009503 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009504 trace_printk_init_buffers();
9505
Steven Rostedt73c51622009-03-11 13:42:01 -04009506 /* To save memory, keep the ring buffer size to its minimum */
9507 if (ring_buffer_expanded)
9508 ring_buf_size = trace_buf_size;
9509 else
9510 ring_buf_size = 1;
9511
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309512 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009513 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009514
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009515 raw_spin_lock_init(&global_trace.start_lock);
9516
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009517 /*
9518 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009519 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009520 * the buffer, then the user would lose any trace that was in the
9521 * buffer. The memory will be removed once the "instance" is removed.
9522 */
9523 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9524 "trace/RB:preapre", trace_rb_cpu_prepare,
9525 NULL);
9526 if (ret < 0)
9527 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009528 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009529 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009530 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9531 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009532 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009533
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009534 if (trace_create_savedcmd() < 0)
9535 goto out_free_temp_buffer;
9536
Steven Rostedtab464282008-05-12 21:21:00 +02009537 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009538 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009539 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009540 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009541 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009542
Steven Rostedt499e5472012-02-22 15:50:28 -05009543 if (global_trace.buffer_disabled)
9544 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009545
Steven Rostedte1e232c2014-02-10 23:38:46 -05009546 if (trace_boot_clock) {
9547 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9548 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009549 pr_warn("Trace clock %s not defined, going back to default\n",
9550 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009551 }
9552
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009553 /*
9554 * register_tracer() might reference current_trace, so it
9555 * needs to be set before we register anything. This is
9556 * just a bootstrap of current_trace anyway.
9557 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009558 global_trace.current_trace = &nop_trace;
9559
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009560 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9561
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009562 ftrace_init_global_array_ops(&global_trace);
9563
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009564 init_trace_flags_index(&global_trace);
9565
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009566 register_tracer(&nop_trace);
9567
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009568 /* Function tracing may start here (via kernel command line) */
9569 init_function_trace();
9570
Steven Rostedt60a11772008-05-12 21:20:44 +02009571 /* All seems OK, enable tracing */
9572 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009573
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009574 atomic_notifier_chain_register(&panic_notifier_list,
9575 &trace_panic_notifier);
9576
9577 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009578
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009579 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9580
9581 INIT_LIST_HEAD(&global_trace.systems);
9582 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009583 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009584 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009585 list_add(&global_trace.list, &ftrace_trace_arrays);
9586
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009587 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009588
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009589 register_snapshot_cmd();
9590
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009591 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009592
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009593out_free_savedcmd:
9594 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009595out_free_temp_buffer:
9596 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009597out_rm_hp_state:
9598 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309599out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009600 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309601out_free_buffer_mask:
9602 free_cpumask_var(tracing_buffer_mask);
9603out:
9604 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009605}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009606
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009607void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009608{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009609 if (tracepoint_printk) {
9610 tracepoint_print_iter =
9611 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009612 if (MEM_FAIL(!tracepoint_print_iter,
9613 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009614 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009615 else
9616 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009617 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009618 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009619}
9620
9621void __init trace_init(void)
9622{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009623 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009624}
9625
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009626__init static int clear_boot_tracer(void)
9627{
9628 /*
9629 * The default tracer at boot buffer is an init section.
9630 * This function is called in lateinit. If we did not
9631 * find the boot tracer, then clear it out, to prevent
9632 * later registration from accessing the buffer that is
9633 * about to be freed.
9634 */
9635 if (!default_bootup_tracer)
9636 return 0;
9637
9638 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9639 default_bootup_tracer);
9640 default_bootup_tracer = NULL;
9641
9642 return 0;
9643}
9644
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009645fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009646late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009647
9648#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9649__init static int tracing_set_default_clock(void)
9650{
9651 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009652 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009653 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9654 pr_warn("Can not set tracing clock due to lockdown\n");
9655 return -EPERM;
9656 }
9657
Chris Wilson3fd49c92018-03-30 16:01:31 +01009658 printk(KERN_WARNING
9659 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9660 "If you want to keep using the local clock, then add:\n"
9661 " \"trace_clock=local\"\n"
9662 "on the kernel command line\n");
9663 tracing_set_clock(&global_trace, "global");
9664 }
9665
9666 return 0;
9667}
9668late_initcall_sync(tracing_set_default_clock);
9669#endif