blob: 47889123be7ff5d5474749f1d584fe43a4a3933c [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
71 * If a tracer is running, we do not want to run SELFTEST.
72 */
Li Zefan020e5f82009-07-01 10:47:05 +080073bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075/* Pipe tracepoints to printk */
76struct trace_iterator *tracepoint_print_iter;
77int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050078static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050079
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080/* For tracers that don't implement custom flags */
81static struct tracer_opt dummy_tracer_opt[] = {
82 { }
83};
84
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050085static int
86dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010087{
88 return 0;
89}
Steven Rostedt0f048702008-11-05 16:05:44 -050090
91/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040092 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
94 * occurred.
95 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070096static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040097
98/*
Steven Rostedt0f048702008-11-05 16:05:44 -050099 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
102 * this back to zero.
103 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100104static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500105
Jason Wessel955b61e2010-08-05 09:22:23 -0500106cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200107
Steven Rostedt944ac422008-10-23 19:26:08 -0400108/*
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 *
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
115 * serial console.
116 *
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400122 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200123
124enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400125
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400126/* When set, tracing will stop when a WARN*() is hit */
127int __disable_trace_on_warning;
128
Jeremy Linton681bec02017-05-31 16:56:53 -0500129#ifdef CONFIG_TRACE_EVAL_MAP_FILE
130/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500131struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400132 struct module *mod;
133 unsigned long length;
134};
135
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500136union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400137
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500138struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 /*
140 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500141 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500143 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144 const char *end; /* points to NULL */
145};
146
Jeremy Linton1793ed92017-05-31 16:56:46 -0500147static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
149/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500154 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500157 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160};
161
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500162static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500163#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900165int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500166static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200167 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168
Li Zefanee6c2c12009-09-18 14:06:47 +0800169#define MAX_TRACER_SIZE 100
170static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500171static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500173static bool allocate_snapshot;
174
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500178 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400179 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500180 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181 return 1;
182}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200183__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt944ac422008-10-23 19:26:08 -0400185static int __init set_ftrace_dump_on_oops(char *str)
186{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
189 return 1;
190 }
191
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
194 return 1;
195 }
196
197 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400198}
199__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200200
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201static int __init stop_trace_on_warning(char *str)
202{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205 return 1;
206}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200207__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400208
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400209static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500210{
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
214 return 1;
215}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400216__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500217
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400218
219static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400220
221static int __init set_trace_boot_options(char *str)
222{
Chen Gang67012ab2013-04-08 12:06:44 +0800223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400224 return 0;
225}
226__setup("trace_options=", set_trace_boot_options);
227
Steven Rostedte1e232c2014-02-10 23:38:46 -0500228static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229static char *trace_boot_clock __initdata;
230
231static int __init set_trace_boot_clock(char *str)
232{
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
235 return 0;
236}
237__setup("trace_clock=", set_trace_boot_clock);
238
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500239static int __init set_tracepoint_printk(char *str)
240{
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
243 return 1;
244}
245__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400246
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100247unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200248{
249 nsec += 500;
250 do_div(nsec, 1000);
251 return nsec;
252}
253
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400254/* trace_flags holds trace_options default values */
255#define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400262/* trace_options that are only supported by global_trace */
263#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
265
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400266/* trace_flags that are default zero for instances */
267#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400269
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200270/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800271 * The global_trace is the descriptor that holds the top-level tracing
272 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
Divya Indi28879782019-11-20 11:08:38 -0800304/**
305 * trace_array_put - Decrement the reference counter for this trace array.
306 *
307 * NOTE: Use this when we no longer need the trace array returned by
308 * trace_array_get_by_name(). This ensures the trace array can be later
309 * destroyed.
310 *
311 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400312void trace_array_put(struct trace_array *this_tr)
313{
Divya Indi28879782019-11-20 11:08:38 -0800314 if (!this_tr)
315 return;
316
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
320}
Divya Indi28879782019-11-20 11:08:38 -0800321EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400322
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400323int tracing_check_open_get_tr(struct trace_array *tr)
324{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400325 int ret;
326
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
328 if (ret)
329 return ret;
330
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400331 if (tracing_disabled)
332 return -ENODEV;
333
334 if (tr && trace_array_get(tr) < 0)
335 return -ENODEV;
336
337 return 0;
338}
339
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400340int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500341 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500342 struct ring_buffer_event *event)
343{
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400346 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500347 return 1;
348 }
349
350 return 0;
351}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500352
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400353void trace_free_pid_list(struct trace_pid_list *pid_list)
354{
355 vfree(pid_list->pids);
356 kfree(pid_list);
357}
358
Steven Rostedtd8275c42016-04-14 12:15:22 -0400359/**
360 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
361 * @filtered_pids: The list of pids to check
362 * @search_pid: The PID to find in @filtered_pids
363 *
364 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
365 */
366bool
367trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
368{
369 /*
370 * If pid_max changed after filtered_pids was created, we
371 * by default ignore all pids greater than the previous pid_max.
372 */
373 if (search_pid >= filtered_pids->pid_max)
374 return false;
375
376 return test_bit(search_pid, filtered_pids->pids);
377}
378
379/**
380 * trace_ignore_this_task - should a task be ignored for tracing
381 * @filtered_pids: The list of pids to check
382 * @task: The task that should be ignored if not filtered
383 *
384 * Checks if @task should be traced or not from @filtered_pids.
385 * Returns true if @task should *NOT* be traced.
386 * Returns false if @task should be traced.
387 */
388bool
389trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
390{
391 /*
392 * Return false, because if filtered_pids does not exist,
393 * all pids are good to trace.
394 */
395 if (!filtered_pids)
396 return false;
397
398 return !trace_find_filtered_pid(filtered_pids, task->pid);
399}
400
401/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700402 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400403 * @pid_list: The list to modify
404 * @self: The current task for fork or NULL for exit
405 * @task: The task to add or remove
406 *
407 * If adding a task, if @self is defined, the task is only added if @self
408 * is also included in @pid_list. This happens on fork and tasks should
409 * only be added when the parent is listed. If @self is NULL, then the
410 * @task pid will be removed from the list, which would happen on exit
411 * of a task.
412 */
413void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
414 struct task_struct *self,
415 struct task_struct *task)
416{
417 if (!pid_list)
418 return;
419
420 /* For forks, we only add if the forking task is listed */
421 if (self) {
422 if (!trace_find_filtered_pid(pid_list, self->pid))
423 return;
424 }
425
426 /* Sorry, but we don't support pid_max changing after setting */
427 if (task->pid >= pid_list->pid_max)
428 return;
429
430 /* "self" is set for forks, and NULL for exits */
431 if (self)
432 set_bit(task->pid, pid_list->pids);
433 else
434 clear_bit(task->pid, pid_list->pids);
435}
436
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400437/**
438 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
439 * @pid_list: The pid list to show
440 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
441 * @pos: The position of the file
442 *
443 * This is used by the seq_file "next" operation to iterate the pids
444 * listed in a trace_pid_list structure.
445 *
446 * Returns the pid+1 as we want to display pid of zero, but NULL would
447 * stop the iteration.
448 */
449void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
450{
451 unsigned long pid = (unsigned long)v;
452
453 (*pos)++;
454
455 /* pid already is +1 of the actual prevous bit */
456 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
457
458 /* Return pid + 1 to allow zero to be represented */
459 if (pid < pid_list->pid_max)
460 return (void *)(pid + 1);
461
462 return NULL;
463}
464
465/**
466 * trace_pid_start - Used for seq_file to start reading pid lists
467 * @pid_list: The pid list to show
468 * @pos: The position of the file
469 *
470 * This is used by seq_file "start" operation to start the iteration
471 * of listing pids.
472 *
473 * Returns the pid+1 as we want to display pid of zero, but NULL would
474 * stop the iteration.
475 */
476void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
477{
478 unsigned long pid;
479 loff_t l = 0;
480
481 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
482 if (pid >= pid_list->pid_max)
483 return NULL;
484
485 /* Return pid + 1 so that zero can be the exit value */
486 for (pid++; pid && l < *pos;
487 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
488 ;
489 return (void *)pid;
490}
491
492/**
493 * trace_pid_show - show the current pid in seq_file processing
494 * @m: The seq_file structure to write into
495 * @v: A void pointer of the pid (+1) value to display
496 *
497 * Can be directly used by seq_file operations to display the current
498 * pid value.
499 */
500int trace_pid_show(struct seq_file *m, void *v)
501{
502 unsigned long pid = (unsigned long)v - 1;
503
504 seq_printf(m, "%lu\n", pid);
505 return 0;
506}
507
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400508/* 128 should be much more than enough */
509#define PID_BUF_SIZE 127
510
511int trace_pid_write(struct trace_pid_list *filtered_pids,
512 struct trace_pid_list **new_pid_list,
513 const char __user *ubuf, size_t cnt)
514{
515 struct trace_pid_list *pid_list;
516 struct trace_parser parser;
517 unsigned long val;
518 int nr_pids = 0;
519 ssize_t read = 0;
520 ssize_t ret = 0;
521 loff_t pos;
522 pid_t pid;
523
524 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
525 return -ENOMEM;
526
527 /*
528 * Always recreate a new array. The write is an all or nothing
529 * operation. Always create a new array when adding new pids by
530 * the user. If the operation fails, then the current list is
531 * not modified.
532 */
533 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500534 if (!pid_list) {
535 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400536 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500537 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400538
539 pid_list->pid_max = READ_ONCE(pid_max);
540
541 /* Only truncating will shrink pid_max */
542 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
543 pid_list->pid_max = filtered_pids->pid_max;
544
545 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
546 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500547 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400548 kfree(pid_list);
549 return -ENOMEM;
550 }
551
552 if (filtered_pids) {
553 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000554 for_each_set_bit(pid, filtered_pids->pids,
555 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400556 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400557 nr_pids++;
558 }
559 }
560
561 while (cnt > 0) {
562
563 pos = 0;
564
565 ret = trace_get_user(&parser, ubuf, cnt, &pos);
566 if (ret < 0 || !trace_parser_loaded(&parser))
567 break;
568
569 read += ret;
570 ubuf += ret;
571 cnt -= ret;
572
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400573 ret = -EINVAL;
574 if (kstrtoul(parser.buffer, 0, &val))
575 break;
576 if (val >= pid_list->pid_max)
577 break;
578
579 pid = (pid_t)val;
580
581 set_bit(pid, pid_list->pids);
582 nr_pids++;
583
584 trace_parser_clear(&parser);
585 ret = 0;
586 }
587 trace_parser_put(&parser);
588
589 if (ret < 0) {
590 trace_free_pid_list(pid_list);
591 return ret;
592 }
593
594 if (!nr_pids) {
595 /* Cleared the list of pids */
596 trace_free_pid_list(pid_list);
597 read = ret;
598 pid_list = NULL;
599 }
600
601 *new_pid_list = pid_list;
602
603 return read;
604}
605
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500606static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400607{
608 u64 ts;
609
610 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700611 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400612 return trace_clock_local();
613
Alexander Z Lam94571582013-08-02 18:36:16 -0700614 ts = ring_buffer_time_stamp(buf->buffer, cpu);
615 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400616
617 return ts;
618}
619
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100620u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700621{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500622 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700623}
624
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400625/**
626 * tracing_is_enabled - Show if global_trace has been disabled
627 *
628 * Shows if the global trace has been enabled or not. It uses the
629 * mirror flag "buffer_disabled" to be used in fast paths such as for
630 * the irqsoff tracer. But it may be inaccurate due to races. If you
631 * need to know the accurate state, use tracing_is_on() which is a little
632 * slower, but accurate.
633 */
Steven Rostedt90369902008-11-05 16:05:44 -0500634int tracing_is_enabled(void)
635{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400636 /*
637 * For quick access (irqsoff uses this in fast path), just
638 * return the mirror variable of the state of the ring buffer.
639 * It's a little racy, but we don't really care.
640 */
641 smp_rmb();
642 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500643}
644
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200645/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400646 * trace_buf_size is the size in bytes that is allocated
647 * for a buffer. Note, the number of bytes is always rounded
648 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400649 *
650 * This number is purposely set to a low number of 16384.
651 * If the dump on oops happens, it will be much appreciated
652 * to not have to wait for all that output. Anyway this can be
653 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200654 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400655#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400656
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400657static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200658
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200659/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200660static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200661
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200662/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200663 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200664 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700665DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200666
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800667/*
668 * serialize the access of the ring buffer
669 *
670 * ring buffer serializes readers, but it is low level protection.
671 * The validity of the events (which returns by ring_buffer_peek() ..etc)
672 * are not protected by ring buffer.
673 *
674 * The content of events may become garbage if we allow other process consumes
675 * these events concurrently:
676 * A) the page of the consumed events may become a normal page
677 * (not reader page) in ring buffer, and this page will be rewrited
678 * by events producer.
679 * B) The page of the consumed events may become a page for splice_read,
680 * and this page will be returned to system.
681 *
682 * These primitives allow multi process access to different cpu ring buffer
683 * concurrently.
684 *
685 * These primitives don't distinguish read-only and read-consume access.
686 * Multi read-only access are also serialized.
687 */
688
689#ifdef CONFIG_SMP
690static DECLARE_RWSEM(all_cpu_access_lock);
691static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500695 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800696 /* gain it for accessing the whole ring buffer. */
697 down_write(&all_cpu_access_lock);
698 } else {
699 /* gain it for accessing a cpu ring buffer. */
700
Steven Rostedtae3b5092013-01-23 15:22:59 -0500701 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800702 down_read(&all_cpu_access_lock);
703
704 /* Secondly block other access to this @cpu ring buffer. */
705 mutex_lock(&per_cpu(cpu_access_lock, cpu));
706 }
707}
708
709static inline void trace_access_unlock(int cpu)
710{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500711 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800712 up_write(&all_cpu_access_lock);
713 } else {
714 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
715 up_read(&all_cpu_access_lock);
716 }
717}
718
719static inline void trace_access_lock_init(void)
720{
721 int cpu;
722
723 for_each_possible_cpu(cpu)
724 mutex_init(&per_cpu(cpu_access_lock, cpu));
725}
726
727#else
728
729static DEFINE_MUTEX(access_lock);
730
731static inline void trace_access_lock(int cpu)
732{
733 (void)cpu;
734 mutex_lock(&access_lock);
735}
736
737static inline void trace_access_unlock(int cpu)
738{
739 (void)cpu;
740 mutex_unlock(&access_lock);
741}
742
743static inline void trace_access_lock_init(void)
744{
745}
746
747#endif
748
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400749#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500750static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400751 unsigned long flags,
752 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400753static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500754 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400755 unsigned long flags,
756 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400757
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400758#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500759static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400760 unsigned long flags,
761 int skip, int pc, struct pt_regs *regs)
762{
763}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400764static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500765 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400766 unsigned long flags,
767 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400768{
769}
770
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400771#endif
772
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500773static __always_inline void
774trace_event_setup(struct ring_buffer_event *event,
775 int type, unsigned long flags, int pc)
776{
777 struct trace_entry *ent = ring_buffer_event_data(event);
778
Cong Wang46710f32019-05-25 09:57:59 -0700779 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500780}
781
782static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500783__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500784 int type,
785 unsigned long len,
786 unsigned long flags, int pc)
787{
788 struct ring_buffer_event *event;
789
790 event = ring_buffer_lock_reserve(buffer, len);
791 if (event != NULL)
792 trace_event_setup(event, type, flags, pc);
793
794 return event;
795}
796
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400797void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400798{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500799 if (tr->array_buffer.buffer)
800 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400801 /*
802 * This flag is looked at when buffers haven't been allocated
803 * yet, or by some tracers (like irqsoff), that just want to
804 * know if the ring buffer has been disabled, but it can handle
805 * races of where it gets disabled but we still do a record.
806 * As the check is in the fast path of the tracers, it is more
807 * important to be fast than accurate.
808 */
809 tr->buffer_disabled = 0;
810 /* Make the flag seen by readers */
811 smp_wmb();
812}
813
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200814/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500815 * tracing_on - enable tracing buffers
816 *
817 * This function enables tracing buffers that may have been
818 * disabled with tracing_off.
819 */
820void tracing_on(void)
821{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400822 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500823}
824EXPORT_SYMBOL_GPL(tracing_on);
825
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500826
827static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500828__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500829{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700830 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500831
832 /* If this is the temp buffer, we need to commit fully */
833 if (this_cpu_read(trace_buffered_event) == event) {
834 /* Length is in event->array[0] */
835 ring_buffer_write(buffer, event->array[0], &event->array[1]);
836 /* Release the temp buffer */
837 this_cpu_dec(trace_buffered_event_cnt);
838 } else
839 ring_buffer_unlock_commit(buffer, event);
840}
841
Steven Rostedt499e5472012-02-22 15:50:28 -0500842/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500843 * __trace_puts - write a constant string into the trace buffer.
844 * @ip: The address of the caller
845 * @str: The constant string to write
846 * @size: The size of the string.
847 */
848int __trace_puts(unsigned long ip, const char *str, int size)
849{
850 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500851 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500852 struct print_entry *entry;
853 unsigned long irq_flags;
854 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800855 int pc;
856
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400857 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800858 return 0;
859
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800860 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500861
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500862 if (unlikely(tracing_selftest_running || tracing_disabled))
863 return 0;
864
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500865 alloc = sizeof(*entry) + size + 2; /* possible \n added */
866
867 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500868 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500869 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500870 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
871 irq_flags, pc);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500872 if (!event) {
873 size = 0;
874 goto out;
875 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500876
877 entry = ring_buffer_event_data(event);
878 entry->ip = ip;
879
880 memcpy(&entry->buf, str, size);
881
882 /* Add a newline if necessary */
883 if (entry->buf[size - 1] != '\n') {
884 entry->buf[size] = '\n';
885 entry->buf[size + 1] = '\0';
886 } else
887 entry->buf[size] = '\0';
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500891 out:
892 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500893 return size;
894}
895EXPORT_SYMBOL_GPL(__trace_puts);
896
897/**
898 * __trace_bputs - write the pointer to a constant string into trace buffer
899 * @ip: The address of the caller
900 * @str: The constant string to write to the buffer to
901 */
902int __trace_bputs(unsigned long ip, const char *str)
903{
904 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500905 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500906 struct bputs_entry *entry;
907 unsigned long irq_flags;
908 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500909 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800910 int pc;
911
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400912 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800913 return 0;
914
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800915 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500916
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500917 if (unlikely(tracing_selftest_running || tracing_disabled))
918 return 0;
919
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500920 local_save_flags(irq_flags);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500921 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500922
923 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500924 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
925 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500926 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500927 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500928
929 entry = ring_buffer_event_data(event);
930 entry->ip = ip;
931 entry->str = str;
932
933 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400934 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500935
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -0500936 ret = 1;
937 out:
938 ring_buffer_nest_end(buffer);
939 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500940}
941EXPORT_SYMBOL_GPL(__trace_bputs);
942
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500943#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600944void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500945{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500946 struct tracer *tracer = tr->current_trace;
947 unsigned long flags;
948
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500949 if (in_nmi()) {
950 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
951 internal_trace_puts("*** snapshot is being ignored ***\n");
952 return;
953 }
954
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500955 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500956 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
957 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500958 tracing_off();
959 return;
960 }
961
962 /* Note, snapshot can not be used when the tracer uses it */
963 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500964 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
965 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500966 return;
967 }
968
969 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600970 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500971 local_irq_restore(flags);
972}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400973
Tom Zanussia35873a2019-02-13 17:42:45 -0600974void tracing_snapshot_instance(struct trace_array *tr)
975{
976 tracing_snapshot_instance_cond(tr, NULL);
977}
978
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400979/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800980 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400981 *
982 * This causes a swap between the snapshot buffer and the current live
983 * tracing buffer. You can use this to take snapshots of the live
984 * trace when some condition is triggered, but continue to trace.
985 *
986 * Note, make sure to allocate the snapshot with either
987 * a tracing_snapshot_alloc(), or by doing it manually
988 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
989 *
990 * If the snapshot buffer is not allocated, it will stop tracing.
991 * Basically making a permanent snapshot.
992 */
993void tracing_snapshot(void)
994{
995 struct trace_array *tr = &global_trace;
996
997 tracing_snapshot_instance(tr);
998}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500999EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001000
Tom Zanussia35873a2019-02-13 17:42:45 -06001001/**
1002 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1003 * @tr: The tracing instance to snapshot
1004 * @cond_data: The data to be tested conditionally, and possibly saved
1005 *
1006 * This is the same as tracing_snapshot() except that the snapshot is
1007 * conditional - the snapshot will only happen if the
1008 * cond_snapshot.update() implementation receiving the cond_data
1009 * returns true, which means that the trace array's cond_snapshot
1010 * update() operation used the cond_data to determine whether the
1011 * snapshot should be taken, and if it was, presumably saved it along
1012 * with the snapshot.
1013 */
1014void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1015{
1016 tracing_snapshot_instance_cond(tr, cond_data);
1017}
1018EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1019
1020/**
1021 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1022 * @tr: The tracing instance
1023 *
1024 * When the user enables a conditional snapshot using
1025 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1026 * with the snapshot. This accessor is used to retrieve it.
1027 *
1028 * Should not be called from cond_snapshot.update(), since it takes
1029 * the tr->max_lock lock, which the code calling
1030 * cond_snapshot.update() has already done.
1031 *
1032 * Returns the cond_data associated with the trace array's snapshot.
1033 */
1034void *tracing_cond_snapshot_data(struct trace_array *tr)
1035{
1036 void *cond_data = NULL;
1037
1038 arch_spin_lock(&tr->max_lock);
1039
1040 if (tr->cond_snapshot)
1041 cond_data = tr->cond_snapshot->cond_data;
1042
1043 arch_spin_unlock(&tr->max_lock);
1044
1045 return cond_data;
1046}
1047EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1048
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001049static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1050 struct array_buffer *size_buf, int cpu_id);
1051static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001052
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001053int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001054{
1055 int ret;
1056
1057 if (!tr->allocated_snapshot) {
1058
1059 /* allocate spare buffer */
1060 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001061 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001062 if (ret < 0)
1063 return ret;
1064
1065 tr->allocated_snapshot = true;
1066 }
1067
1068 return 0;
1069}
1070
Fabian Frederickad1438a2014-04-17 21:44:42 +02001071static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001072{
1073 /*
1074 * We don't free the ring buffer. instead, resize it because
1075 * The max_tr ring buffer has some state (e.g. ring->clock) and
1076 * we want preserve it.
1077 */
1078 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1079 set_buffer_entries(&tr->max_buffer, 1);
1080 tracing_reset_online_cpus(&tr->max_buffer);
1081 tr->allocated_snapshot = false;
1082}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001083
1084/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001085 * tracing_alloc_snapshot - allocate snapshot buffer.
1086 *
1087 * This only allocates the snapshot buffer if it isn't already
1088 * allocated - it doesn't also take a snapshot.
1089 *
1090 * This is meant to be used in cases where the snapshot buffer needs
1091 * to be set up for events that can't sleep but need to be able to
1092 * trigger a snapshot.
1093 */
1094int tracing_alloc_snapshot(void)
1095{
1096 struct trace_array *tr = &global_trace;
1097 int ret;
1098
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001099 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001100 WARN_ON(ret < 0);
1101
1102 return ret;
1103}
1104EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1105
1106/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001107 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001108 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001109 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001110 * snapshot buffer if it isn't already allocated. Use this only
1111 * where it is safe to sleep, as the allocation may sleep.
1112 *
1113 * This causes a swap between the snapshot buffer and the current live
1114 * tracing buffer. You can use this to take snapshots of the live
1115 * trace when some condition is triggered, but continue to trace.
1116 */
1117void tracing_snapshot_alloc(void)
1118{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001119 int ret;
1120
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001121 ret = tracing_alloc_snapshot();
1122 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001123 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001124
1125 tracing_snapshot();
1126}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001127EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001128
1129/**
1130 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1131 * @tr: The tracing instance
1132 * @cond_data: User data to associate with the snapshot
1133 * @update: Implementation of the cond_snapshot update function
1134 *
1135 * Check whether the conditional snapshot for the given instance has
1136 * already been enabled, or if the current tracer is already using a
1137 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1138 * save the cond_data and update function inside.
1139 *
1140 * Returns 0 if successful, error otherwise.
1141 */
1142int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1143 cond_update_fn_t update)
1144{
1145 struct cond_snapshot *cond_snapshot;
1146 int ret = 0;
1147
1148 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1149 if (!cond_snapshot)
1150 return -ENOMEM;
1151
1152 cond_snapshot->cond_data = cond_data;
1153 cond_snapshot->update = update;
1154
1155 mutex_lock(&trace_types_lock);
1156
1157 ret = tracing_alloc_snapshot_instance(tr);
1158 if (ret)
1159 goto fail_unlock;
1160
1161 if (tr->current_trace->use_max_tr) {
1162 ret = -EBUSY;
1163 goto fail_unlock;
1164 }
1165
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001166 /*
1167 * The cond_snapshot can only change to NULL without the
1168 * trace_types_lock. We don't care if we race with it going
1169 * to NULL, but we want to make sure that it's not set to
1170 * something other than NULL when we get here, which we can
1171 * do safely with only holding the trace_types_lock and not
1172 * having to take the max_lock.
1173 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001174 if (tr->cond_snapshot) {
1175 ret = -EBUSY;
1176 goto fail_unlock;
1177 }
1178
1179 arch_spin_lock(&tr->max_lock);
1180 tr->cond_snapshot = cond_snapshot;
1181 arch_spin_unlock(&tr->max_lock);
1182
1183 mutex_unlock(&trace_types_lock);
1184
1185 return ret;
1186
1187 fail_unlock:
1188 mutex_unlock(&trace_types_lock);
1189 kfree(cond_snapshot);
1190 return ret;
1191}
1192EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1193
1194/**
1195 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1196 * @tr: The tracing instance
1197 *
1198 * Check whether the conditional snapshot for the given instance is
1199 * enabled; if so, free the cond_snapshot associated with it,
1200 * otherwise return -EINVAL.
1201 *
1202 * Returns 0 if successful, error otherwise.
1203 */
1204int tracing_snapshot_cond_disable(struct trace_array *tr)
1205{
1206 int ret = 0;
1207
1208 arch_spin_lock(&tr->max_lock);
1209
1210 if (!tr->cond_snapshot)
1211 ret = -EINVAL;
1212 else {
1213 kfree(tr->cond_snapshot);
1214 tr->cond_snapshot = NULL;
1215 }
1216
1217 arch_spin_unlock(&tr->max_lock);
1218
1219 return ret;
1220}
1221EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001222#else
1223void tracing_snapshot(void)
1224{
1225 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1226}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001227EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001228void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1229{
1230 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1231}
1232EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001233int tracing_alloc_snapshot(void)
1234{
1235 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1236 return -ENODEV;
1237}
1238EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001239void tracing_snapshot_alloc(void)
1240{
1241 /* Give warning */
1242 tracing_snapshot();
1243}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001244EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001245void *tracing_cond_snapshot_data(struct trace_array *tr)
1246{
1247 return NULL;
1248}
1249EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1250int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1251{
1252 return -ENODEV;
1253}
1254EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1255int tracing_snapshot_cond_disable(struct trace_array *tr)
1256{
1257 return false;
1258}
1259EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001260#endif /* CONFIG_TRACER_SNAPSHOT */
1261
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001262void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001263{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001264 if (tr->array_buffer.buffer)
1265 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001266 /*
1267 * This flag is looked at when buffers haven't been allocated
1268 * yet, or by some tracers (like irqsoff), that just want to
1269 * know if the ring buffer has been disabled, but it can handle
1270 * races of where it gets disabled but we still do a record.
1271 * As the check is in the fast path of the tracers, it is more
1272 * important to be fast than accurate.
1273 */
1274 tr->buffer_disabled = 1;
1275 /* Make the flag seen by readers */
1276 smp_wmb();
1277}
1278
Steven Rostedt499e5472012-02-22 15:50:28 -05001279/**
1280 * tracing_off - turn off tracing buffers
1281 *
1282 * This function stops the tracing buffers from recording data.
1283 * It does not disable any overhead the tracers themselves may
1284 * be causing. This function simply causes all recording to
1285 * the ring buffers to fail.
1286 */
1287void tracing_off(void)
1288{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001289 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001290}
1291EXPORT_SYMBOL_GPL(tracing_off);
1292
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001293void disable_trace_on_warning(void)
1294{
1295 if (__disable_trace_on_warning)
1296 tracing_off();
1297}
1298
Steven Rostedt499e5472012-02-22 15:50:28 -05001299/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001300 * tracer_tracing_is_on - show real state of ring buffer enabled
1301 * @tr : the trace array to know if ring buffer is enabled
1302 *
1303 * Shows real state of the ring buffer if it is enabled or not.
1304 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001305bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001306{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001307 if (tr->array_buffer.buffer)
1308 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001309 return !tr->buffer_disabled;
1310}
1311
Steven Rostedt499e5472012-02-22 15:50:28 -05001312/**
1313 * tracing_is_on - show state of ring buffers enabled
1314 */
1315int tracing_is_on(void)
1316{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001317 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001318}
1319EXPORT_SYMBOL_GPL(tracing_is_on);
1320
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001321static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001322{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001323 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001324
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001325 if (!str)
1326 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001327 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001328 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001329 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001330 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001331 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001332 return 1;
1333}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001334__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001335
Tim Bird0e950172010-02-25 15:36:43 -08001336static int __init set_tracing_thresh(char *str)
1337{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001338 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001339 int ret;
1340
1341 if (!str)
1342 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001343 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001344 if (ret < 0)
1345 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001346 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001347 return 1;
1348}
1349__setup("tracing_thresh=", set_tracing_thresh);
1350
Steven Rostedt57f50be2008-05-12 21:20:44 +02001351unsigned long nsecs_to_usecs(unsigned long nsecs)
1352{
1353 return nsecs / 1000;
1354}
1355
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001356/*
1357 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001358 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001359 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001360 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001361 */
1362#undef C
1363#define C(a, b) b
1364
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001365/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001366static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001367 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001368 NULL
1369};
1370
Zhaolei5079f322009-08-25 16:12:56 +08001371static struct {
1372 u64 (*func)(void);
1373 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001374 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001375} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001376 { trace_clock_local, "local", 1 },
1377 { trace_clock_global, "global", 1 },
1378 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001379 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001380 { trace_clock, "perf", 1 },
1381 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001382 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001383 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001384 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001385};
1386
Tom Zanussi860f9f62018-01-15 20:51:48 -06001387bool trace_clock_in_ns(struct trace_array *tr)
1388{
1389 if (trace_clocks[tr->clock_id].in_ns)
1390 return true;
1391
1392 return false;
1393}
1394
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001395/*
1396 * trace_parser_get_init - gets the buffer for trace parser
1397 */
1398int trace_parser_get_init(struct trace_parser *parser, int size)
1399{
1400 memset(parser, 0, sizeof(*parser));
1401
1402 parser->buffer = kmalloc(size, GFP_KERNEL);
1403 if (!parser->buffer)
1404 return 1;
1405
1406 parser->size = size;
1407 return 0;
1408}
1409
1410/*
1411 * trace_parser_put - frees the buffer for trace parser
1412 */
1413void trace_parser_put(struct trace_parser *parser)
1414{
1415 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001416 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001417}
1418
1419/*
1420 * trace_get_user - reads the user input string separated by space
1421 * (matched by isspace(ch))
1422 *
1423 * For each string found the 'struct trace_parser' is updated,
1424 * and the function returns.
1425 *
1426 * Returns number of bytes read.
1427 *
1428 * See kernel/trace/trace.h for 'struct trace_parser' details.
1429 */
1430int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1431 size_t cnt, loff_t *ppos)
1432{
1433 char ch;
1434 size_t read = 0;
1435 ssize_t ret;
1436
1437 if (!*ppos)
1438 trace_parser_clear(parser);
1439
1440 ret = get_user(ch, ubuf++);
1441 if (ret)
1442 goto out;
1443
1444 read++;
1445 cnt--;
1446
1447 /*
1448 * The parser is not finished with the last write,
1449 * continue reading the user input without skipping spaces.
1450 */
1451 if (!parser->cont) {
1452 /* skip white space */
1453 while (cnt && isspace(ch)) {
1454 ret = get_user(ch, ubuf++);
1455 if (ret)
1456 goto out;
1457 read++;
1458 cnt--;
1459 }
1460
Changbin Du76638d92018-01-16 17:02:29 +08001461 parser->idx = 0;
1462
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001463 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001464 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001465 *ppos += read;
1466 ret = read;
1467 goto out;
1468 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001469 }
1470
1471 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001472 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001473 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001474 parser->buffer[parser->idx++] = ch;
1475 else {
1476 ret = -EINVAL;
1477 goto out;
1478 }
1479 ret = get_user(ch, ubuf++);
1480 if (ret)
1481 goto out;
1482 read++;
1483 cnt--;
1484 }
1485
1486 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001487 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001488 parser->buffer[parser->idx] = 0;
1489 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001490 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001491 parser->cont = true;
1492 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001493 /* Make sure the parsed string always terminates with '\0'. */
1494 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001495 } else {
1496 ret = -EINVAL;
1497 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001498 }
1499
1500 *ppos += read;
1501 ret = read;
1502
1503out:
1504 return ret;
1505}
1506
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001507/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001508static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001509{
1510 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001511
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001512 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001513 return -EBUSY;
1514
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001515 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001516 if (cnt > len)
1517 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001518 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001519
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001520 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001521 return cnt;
1522}
1523
Tim Bird0e950172010-02-25 15:36:43 -08001524unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001525static const struct file_operations tracing_max_lat_fops;
1526
1527#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1528 defined(CONFIG_FSNOTIFY)
1529
1530static struct workqueue_struct *fsnotify_wq;
1531
1532static void latency_fsnotify_workfn(struct work_struct *work)
1533{
1534 struct trace_array *tr = container_of(work, struct trace_array,
1535 fsnotify_work);
1536 fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
1537 tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
1538}
1539
1540static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1541{
1542 struct trace_array *tr = container_of(iwork, struct trace_array,
1543 fsnotify_irqwork);
1544 queue_work(fsnotify_wq, &tr->fsnotify_work);
1545}
1546
1547static void trace_create_maxlat_file(struct trace_array *tr,
1548 struct dentry *d_tracer)
1549{
1550 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1551 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1552 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1553 d_tracer, &tr->max_latency,
1554 &tracing_max_lat_fops);
1555}
1556
1557__init static int latency_fsnotify_init(void)
1558{
1559 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1560 WQ_UNBOUND | WQ_HIGHPRI, 0);
1561 if (!fsnotify_wq) {
1562 pr_err("Unable to allocate tr_max_lat_wq\n");
1563 return -ENOMEM;
1564 }
1565 return 0;
1566}
1567
1568late_initcall_sync(latency_fsnotify_init);
1569
1570void latency_fsnotify(struct trace_array *tr)
1571{
1572 if (!fsnotify_wq)
1573 return;
1574 /*
1575 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1576 * possible that we are called from __schedule() or do_idle(), which
1577 * could cause a deadlock.
1578 */
1579 irq_work_queue(&tr->fsnotify_irqwork);
1580}
1581
1582/*
1583 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1584 * defined(CONFIG_FSNOTIFY)
1585 */
1586#else
1587
1588#define trace_create_maxlat_file(tr, d_tracer) \
1589 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1590 &tr->max_latency, &tracing_max_lat_fops)
1591
1592#endif
Tim Bird0e950172010-02-25 15:36:43 -08001593
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001594#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001595/*
1596 * Copy the new maximum trace into the separate maximum-trace
1597 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001598 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001599 */
1600static void
1601__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1602{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001603 struct array_buffer *trace_buf = &tr->array_buffer;
1604 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001605 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1606 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001607
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001608 max_buf->cpu = cpu;
1609 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001610
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001611 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001612 max_data->critical_start = data->critical_start;
1613 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001614
Tom Zanussi85f726a2019-03-05 10:12:00 -06001615 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001616 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001617 /*
1618 * If tsk == current, then use current_uid(), as that does not use
1619 * RCU. The irq tracer can be called out of RCU scope.
1620 */
1621 if (tsk == current)
1622 max_data->uid = current_uid();
1623 else
1624 max_data->uid = task_uid(tsk);
1625
Steven Rostedt8248ac02009-09-02 12:27:41 -04001626 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1627 max_data->policy = tsk->policy;
1628 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001629
1630 /* record this tasks comm */
1631 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001632 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001633}
1634
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001635/**
1636 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1637 * @tr: tracer
1638 * @tsk: the task with the latency
1639 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001640 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001641 *
1642 * Flip the buffers between the @tr and the max_tr and record information
1643 * about which task was the cause of this latency.
1644 */
Ingo Molnare309b412008-05-12 21:20:51 +02001645void
Tom Zanussia35873a2019-02-13 17:42:45 -06001646update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1647 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001648{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001649 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001650 return;
1651
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001652 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001653
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001654 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001655 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001656 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001657 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001658 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001659
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001660 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001661
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001662 /* Inherit the recordable setting from array_buffer */
1663 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001664 ring_buffer_record_on(tr->max_buffer.buffer);
1665 else
1666 ring_buffer_record_off(tr->max_buffer.buffer);
1667
Tom Zanussia35873a2019-02-13 17:42:45 -06001668#ifdef CONFIG_TRACER_SNAPSHOT
1669 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1670 goto out_unlock;
1671#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001672 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001673
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001674 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001675
1676 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001677 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001678}
1679
1680/**
1681 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001682 * @tr: tracer
1683 * @tsk: task with the latency
1684 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001685 *
1686 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001687 */
Ingo Molnare309b412008-05-12 21:20:51 +02001688void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001689update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1690{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001691 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001692
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001693 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001694 return;
1695
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001696 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001697 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001698 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001699 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001700 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001701 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001702
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001703 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001704
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001705 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001706
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001707 if (ret == -EBUSY) {
1708 /*
1709 * We failed to swap the buffer due to a commit taking
1710 * place on this CPU. We fail to record, but we reset
1711 * the max trace buffer (no one writes directly to it)
1712 * and flag that it failed.
1713 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001714 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001715 "Failed to swap buffers due to commit in progress\n");
1716 }
1717
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001718 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001719
1720 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001721 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001722}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001723#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001724
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001725static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001726{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001727 /* Iterators are static, they should be filled or empty */
1728 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001729 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001730
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001731 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001732 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001733}
1734
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001735#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001736static bool selftests_can_run;
1737
1738struct trace_selftests {
1739 struct list_head list;
1740 struct tracer *type;
1741};
1742
1743static LIST_HEAD(postponed_selftests);
1744
1745static int save_selftest(struct tracer *type)
1746{
1747 struct trace_selftests *selftest;
1748
1749 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1750 if (!selftest)
1751 return -ENOMEM;
1752
1753 selftest->type = type;
1754 list_add(&selftest->list, &postponed_selftests);
1755 return 0;
1756}
1757
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001758static int run_tracer_selftest(struct tracer *type)
1759{
1760 struct trace_array *tr = &global_trace;
1761 struct tracer *saved_tracer = tr->current_trace;
1762 int ret;
1763
1764 if (!type->selftest || tracing_selftest_disabled)
1765 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001766
1767 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001768 * If a tracer registers early in boot up (before scheduling is
1769 * initialized and such), then do not run its selftests yet.
1770 * Instead, run it a little later in the boot process.
1771 */
1772 if (!selftests_can_run)
1773 return save_selftest(type);
1774
1775 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001776 * Run a selftest on this tracer.
1777 * Here we reset the trace buffer, and set the current
1778 * tracer to be this tracer. The tracer can then run some
1779 * internal tracing to verify that everything is in order.
1780 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001781 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001782 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001783
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001784 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001785
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001786#ifdef CONFIG_TRACER_MAX_TRACE
1787 if (type->use_max_tr) {
1788 /* If we expanded the buffers, make sure the max is expanded too */
1789 if (ring_buffer_expanded)
1790 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1791 RING_BUFFER_ALL_CPUS);
1792 tr->allocated_snapshot = true;
1793 }
1794#endif
1795
1796 /* the test is responsible for initializing and enabling */
1797 pr_info("Testing tracer %s: ", type->name);
1798 ret = type->selftest(type, tr);
1799 /* the test is responsible for resetting too */
1800 tr->current_trace = saved_tracer;
1801 if (ret) {
1802 printk(KERN_CONT "FAILED!\n");
1803 /* Add the warning after printing 'FAILED' */
1804 WARN_ON(1);
1805 return -1;
1806 }
1807 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001808 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001809
1810#ifdef CONFIG_TRACER_MAX_TRACE
1811 if (type->use_max_tr) {
1812 tr->allocated_snapshot = false;
1813
1814 /* Shrink the max buffer again */
1815 if (ring_buffer_expanded)
1816 ring_buffer_resize(tr->max_buffer.buffer, 1,
1817 RING_BUFFER_ALL_CPUS);
1818 }
1819#endif
1820
1821 printk(KERN_CONT "PASSED\n");
1822 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001823}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001824
1825static __init int init_trace_selftests(void)
1826{
1827 struct trace_selftests *p, *n;
1828 struct tracer *t, **last;
1829 int ret;
1830
1831 selftests_can_run = true;
1832
1833 mutex_lock(&trace_types_lock);
1834
1835 if (list_empty(&postponed_selftests))
1836 goto out;
1837
1838 pr_info("Running postponed tracer tests:\n");
1839
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001840 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001841 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001842 /* This loop can take minutes when sanitizers are enabled, so
1843 * lets make sure we allow RCU processing.
1844 */
1845 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001846 ret = run_tracer_selftest(p->type);
1847 /* If the test fails, then warn and remove from available_tracers */
1848 if (ret < 0) {
1849 WARN(1, "tracer: %s failed selftest, disabling\n",
1850 p->type->name);
1851 last = &trace_types;
1852 for (t = trace_types; t; t = t->next) {
1853 if (t == p->type) {
1854 *last = t->next;
1855 break;
1856 }
1857 last = &t->next;
1858 }
1859 }
1860 list_del(&p->list);
1861 kfree(p);
1862 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001863 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001864
1865 out:
1866 mutex_unlock(&trace_types_lock);
1867
1868 return 0;
1869}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001870core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001871#else
1872static inline int run_tracer_selftest(struct tracer *type)
1873{
1874 return 0;
1875}
1876#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001877
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001878static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1879
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001880static void __init apply_trace_boot_options(void);
1881
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001882/**
1883 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001884 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001885 *
1886 * Register a new plugin tracer.
1887 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001888int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001889{
1890 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001891 int ret = 0;
1892
1893 if (!type->name) {
1894 pr_info("Tracer must have a name\n");
1895 return -1;
1896 }
1897
Dan Carpenter24a461d2010-07-10 12:06:44 +02001898 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001899 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1900 return -1;
1901 }
1902
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001903 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11001904 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05001905 type->name);
1906 return -EPERM;
1907 }
1908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001909 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001910
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001911 tracing_selftest_running = true;
1912
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001913 for (t = trace_types; t; t = t->next) {
1914 if (strcmp(type->name, t->name) == 0) {
1915 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001916 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001917 type->name);
1918 ret = -1;
1919 goto out;
1920 }
1921 }
1922
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001923 if (!type->set_flag)
1924 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001925 if (!type->flags) {
1926 /*allocate a dummy tracer_flags*/
1927 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001928 if (!type->flags) {
1929 ret = -ENOMEM;
1930 goto out;
1931 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001932 type->flags->val = 0;
1933 type->flags->opts = dummy_tracer_opt;
1934 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001935 if (!type->flags->opts)
1936 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001937
Chunyu Hud39cdd22016-03-08 21:37:01 +08001938 /* store the tracer for __set_tracer_option */
1939 type->flags->trace = type;
1940
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001941 ret = run_tracer_selftest(type);
1942 if (ret < 0)
1943 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001944
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001945 type->next = trace_types;
1946 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001947 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001949 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001950 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951 mutex_unlock(&trace_types_lock);
1952
Steven Rostedtdac74942009-02-05 01:13:38 -05001953 if (ret || !default_bootup_tracer)
1954 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001955
Li Zefanee6c2c12009-09-18 14:06:47 +08001956 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001957 goto out_unlock;
1958
1959 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1960 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001961 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001962 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001963
1964 apply_trace_boot_options();
1965
Steven Rostedtdac74942009-02-05 01:13:38 -05001966 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001967 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001968#ifdef CONFIG_FTRACE_STARTUP_TEST
1969 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1970 type->name);
1971#endif
1972
1973 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001974 return ret;
1975}
1976
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001977static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001978{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001979 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001980
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001981 if (!buffer)
1982 return;
1983
Steven Rostedtf6339032009-09-04 12:35:16 -04001984 ring_buffer_record_disable(buffer);
1985
1986 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001987 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001988 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001989
1990 ring_buffer_record_enable(buffer);
1991}
1992
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001993void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001994{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001995 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001996 int cpu;
1997
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001998 if (!buffer)
1999 return;
2000
Steven Rostedt621968c2009-09-04 12:02:35 -04002001 ring_buffer_record_disable(buffer);
2002
2003 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002004 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002005
Alexander Z Lam94571582013-08-02 18:36:16 -07002006 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002007
2008 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04002009 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04002010
2011 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002012}
2013
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002014/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002015void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002016{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002017 struct trace_array *tr;
2018
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002019 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002020 if (!tr->clear_trace)
2021 continue;
2022 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002023 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002024#ifdef CONFIG_TRACER_MAX_TRACE
2025 tracing_reset_online_cpus(&tr->max_buffer);
2026#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002027 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002028}
2029
Joel Fernandesd914ba32017-06-26 19:01:55 -07002030static int *tgid_map;
2031
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002032#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002033#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002034static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002035struct saved_cmdlines_buffer {
2036 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2037 unsigned *map_cmdline_to_pid;
2038 unsigned cmdline_num;
2039 int cmdline_idx;
2040 char *saved_cmdlines;
2041};
2042static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002043
Steven Rostedt25b0b442008-05-12 21:21:00 +02002044/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002045static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002046
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002047static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002048{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002049 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2050}
2051
2052static inline void set_cmdline(int idx, const char *cmdline)
2053{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002054 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002055}
2056
2057static int allocate_cmdlines_buffer(unsigned int val,
2058 struct saved_cmdlines_buffer *s)
2059{
Kees Cook6da2ec52018-06-12 13:55:00 -07002060 s->map_cmdline_to_pid = kmalloc_array(val,
2061 sizeof(*s->map_cmdline_to_pid),
2062 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002063 if (!s->map_cmdline_to_pid)
2064 return -ENOMEM;
2065
Kees Cook6da2ec52018-06-12 13:55:00 -07002066 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002067 if (!s->saved_cmdlines) {
2068 kfree(s->map_cmdline_to_pid);
2069 return -ENOMEM;
2070 }
2071
2072 s->cmdline_idx = 0;
2073 s->cmdline_num = val;
2074 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2075 sizeof(s->map_pid_to_cmdline));
2076 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2077 val * sizeof(*s->map_cmdline_to_pid));
2078
2079 return 0;
2080}
2081
2082static int trace_create_savedcmd(void)
2083{
2084 int ret;
2085
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002086 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002087 if (!savedcmd)
2088 return -ENOMEM;
2089
2090 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2091 if (ret < 0) {
2092 kfree(savedcmd);
2093 savedcmd = NULL;
2094 return -ENOMEM;
2095 }
2096
2097 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002098}
2099
Carsten Emdeb5130b12009-09-13 01:43:07 +02002100int is_tracing_stopped(void)
2101{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002102 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002103}
2104
Steven Rostedt0f048702008-11-05 16:05:44 -05002105/**
2106 * tracing_start - quick start of the tracer
2107 *
2108 * If tracing is enabled but was stopped by tracing_stop,
2109 * this will start the tracer back up.
2110 */
2111void tracing_start(void)
2112{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002113 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002114 unsigned long flags;
2115
2116 if (tracing_disabled)
2117 return;
2118
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002119 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2120 if (--global_trace.stop_count) {
2121 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002122 /* Someone screwed up their debugging */
2123 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002124 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002125 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002126 goto out;
2127 }
2128
Steven Rostedta2f80712010-03-12 19:56:00 -05002129 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002130 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002131
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002132 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002133 if (buffer)
2134 ring_buffer_record_enable(buffer);
2135
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002136#ifdef CONFIG_TRACER_MAX_TRACE
2137 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002138 if (buffer)
2139 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002140#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002141
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002142 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002143
Steven Rostedt0f048702008-11-05 16:05:44 -05002144 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002145 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2146}
2147
2148static void tracing_start_tr(struct trace_array *tr)
2149{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002150 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002151 unsigned long flags;
2152
2153 if (tracing_disabled)
2154 return;
2155
2156 /* If global, we need to also start the max tracer */
2157 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2158 return tracing_start();
2159
2160 raw_spin_lock_irqsave(&tr->start_lock, flags);
2161
2162 if (--tr->stop_count) {
2163 if (tr->stop_count < 0) {
2164 /* Someone screwed up their debugging */
2165 WARN_ON_ONCE(1);
2166 tr->stop_count = 0;
2167 }
2168 goto out;
2169 }
2170
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002171 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002172 if (buffer)
2173 ring_buffer_record_enable(buffer);
2174
2175 out:
2176 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002177}
2178
2179/**
2180 * tracing_stop - quick stop of the tracer
2181 *
2182 * Light weight way to stop tracing. Use in conjunction with
2183 * tracing_start.
2184 */
2185void tracing_stop(void)
2186{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002187 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002188 unsigned long flags;
2189
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002190 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2191 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002192 goto out;
2193
Steven Rostedta2f80712010-03-12 19:56:00 -05002194 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002195 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002196
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002197 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002198 if (buffer)
2199 ring_buffer_record_disable(buffer);
2200
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002201#ifdef CONFIG_TRACER_MAX_TRACE
2202 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002203 if (buffer)
2204 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002205#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002206
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002207 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002208
Steven Rostedt0f048702008-11-05 16:05:44 -05002209 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002210 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2211}
2212
2213static void tracing_stop_tr(struct trace_array *tr)
2214{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002215 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002216 unsigned long flags;
2217
2218 /* If global, we need to also stop the max tracer */
2219 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2220 return tracing_stop();
2221
2222 raw_spin_lock_irqsave(&tr->start_lock, flags);
2223 if (tr->stop_count++)
2224 goto out;
2225
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002226 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002227 if (buffer)
2228 ring_buffer_record_disable(buffer);
2229
2230 out:
2231 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002232}
2233
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002234static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002235{
Carsten Emdea635cf02009-03-18 09:00:41 +01002236 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002237
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002238 /* treat recording of idle task as a success */
2239 if (!tsk->pid)
2240 return 1;
2241
2242 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002243 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244
2245 /*
2246 * It's not the end of the world if we don't get
2247 * the lock, but we also don't want to spin
2248 * nor do we want to disable interrupts,
2249 * so if we miss here, then better luck next time.
2250 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002251 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002252 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002253
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002254 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002255 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002256 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002257
Carsten Emdea635cf02009-03-18 09:00:41 +01002258 /*
2259 * Check whether the cmdline buffer at idx has a pid
2260 * mapped. We are going to overwrite that entry so we
2261 * need to clear the map_pid_to_cmdline. Otherwise we
2262 * would read the new comm for the old pid.
2263 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002264 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002265 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002266 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002267
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002268 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2269 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002271 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272 }
2273
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002274 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002275
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002276 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002277
2278 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002279}
2280
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002281static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002282{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002283 unsigned map;
2284
Steven Rostedt4ca530852009-03-16 19:20:15 -04002285 if (!pid) {
2286 strcpy(comm, "<idle>");
2287 return;
2288 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002289
Steven Rostedt74bf4072010-01-25 15:11:53 -05002290 if (WARN_ON_ONCE(pid < 0)) {
2291 strcpy(comm, "<XXX>");
2292 return;
2293 }
2294
Steven Rostedt4ca530852009-03-16 19:20:15 -04002295 if (pid > PID_MAX_DEFAULT) {
2296 strcpy(comm, "<...>");
2297 return;
2298 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002300 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002301 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302302 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002303 else
2304 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002305}
2306
2307void trace_find_cmdline(int pid, char comm[])
2308{
2309 preempt_disable();
2310 arch_spin_lock(&trace_cmdline_lock);
2311
2312 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002314 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002315 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316}
2317
Joel Fernandesd914ba32017-06-26 19:01:55 -07002318int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002319{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002320 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2321 return 0;
2322
2323 return tgid_map[pid];
2324}
2325
2326static int trace_save_tgid(struct task_struct *tsk)
2327{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002328 /* treat recording of idle task as a success */
2329 if (!tsk->pid)
2330 return 1;
2331
2332 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002333 return 0;
2334
2335 tgid_map[tsk->pid] = tsk->tgid;
2336 return 1;
2337}
2338
2339static bool tracing_record_taskinfo_skip(int flags)
2340{
2341 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2342 return true;
2343 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2344 return true;
2345 if (!__this_cpu_read(trace_taskinfo_save))
2346 return true;
2347 return false;
2348}
2349
2350/**
2351 * tracing_record_taskinfo - record the task info of a task
2352 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002353 * @task: task to record
2354 * @flags: TRACE_RECORD_CMDLINE for recording comm
2355 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002356 */
2357void tracing_record_taskinfo(struct task_struct *task, int flags)
2358{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002359 bool done;
2360
Joel Fernandesd914ba32017-06-26 19:01:55 -07002361 if (tracing_record_taskinfo_skip(flags))
2362 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002363
2364 /*
2365 * Record as much task information as possible. If some fail, continue
2366 * to try to record the others.
2367 */
2368 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2369 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2370
2371 /* If recording any information failed, retry again soon. */
2372 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002373 return;
2374
Joel Fernandesd914ba32017-06-26 19:01:55 -07002375 __this_cpu_write(trace_taskinfo_save, false);
2376}
2377
2378/**
2379 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2380 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002381 * @prev: previous task during sched_switch
2382 * @next: next task during sched_switch
2383 * @flags: TRACE_RECORD_CMDLINE for recording comm
2384 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002385 */
2386void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2387 struct task_struct *next, int flags)
2388{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002389 bool done;
2390
Joel Fernandesd914ba32017-06-26 19:01:55 -07002391 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002392 return;
2393
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002394 /*
2395 * Record as much task information as possible. If some fail, continue
2396 * to try to record the others.
2397 */
2398 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2399 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2400 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2401 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002402
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002403 /* If recording any information failed, retry again soon. */
2404 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002405 return;
2406
2407 __this_cpu_write(trace_taskinfo_save, false);
2408}
2409
2410/* Helpers to record a specific task information */
2411void tracing_record_cmdline(struct task_struct *task)
2412{
2413 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2414}
2415
2416void tracing_record_tgid(struct task_struct *task)
2417{
2418 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002419}
2420
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002421/*
2422 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2423 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2424 * simplifies those functions and keeps them in sync.
2425 */
2426enum print_line_t trace_handle_return(struct trace_seq *s)
2427{
2428 return trace_seq_has_overflowed(s) ?
2429 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2430}
2431EXPORT_SYMBOL_GPL(trace_handle_return);
2432
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002433void
Cong Wang46710f32019-05-25 09:57:59 -07002434tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2435 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002436{
2437 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002438
Steven Rostedt777e2082008-09-29 23:02:42 -04002439 entry->preempt_count = pc & 0xff;
2440 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002441 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002442 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002443#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002444 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002445#else
2446 TRACE_FLAG_IRQS_NOSUPPORT |
2447#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002448 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002449 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302450 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002451 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2452 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002454EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455
Steven Rostedte77405a2009-09-02 14:17:06 -04002456struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002457trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002458 int type,
2459 unsigned long len,
2460 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002461{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002462 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002463}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002464
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002465DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2466DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2467static int trace_buffered_event_ref;
2468
2469/**
2470 * trace_buffered_event_enable - enable buffering events
2471 *
2472 * When events are being filtered, it is quicker to use a temporary
2473 * buffer to write the event data into if there's a likely chance
2474 * that it will not be committed. The discard of the ring buffer
2475 * is not as fast as committing, and is much slower than copying
2476 * a commit.
2477 *
2478 * When an event is to be filtered, allocate per cpu buffers to
2479 * write the event data into, and if the event is filtered and discarded
2480 * it is simply dropped, otherwise, the entire data is to be committed
2481 * in one shot.
2482 */
2483void trace_buffered_event_enable(void)
2484{
2485 struct ring_buffer_event *event;
2486 struct page *page;
2487 int cpu;
2488
2489 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2490
2491 if (trace_buffered_event_ref++)
2492 return;
2493
2494 for_each_tracing_cpu(cpu) {
2495 page = alloc_pages_node(cpu_to_node(cpu),
2496 GFP_KERNEL | __GFP_NORETRY, 0);
2497 if (!page)
2498 goto failed;
2499
2500 event = page_address(page);
2501 memset(event, 0, sizeof(*event));
2502
2503 per_cpu(trace_buffered_event, cpu) = event;
2504
2505 preempt_disable();
2506 if (cpu == smp_processor_id() &&
2507 this_cpu_read(trace_buffered_event) !=
2508 per_cpu(trace_buffered_event, cpu))
2509 WARN_ON_ONCE(1);
2510 preempt_enable();
2511 }
2512
2513 return;
2514 failed:
2515 trace_buffered_event_disable();
2516}
2517
2518static void enable_trace_buffered_event(void *data)
2519{
2520 /* Probably not needed, but do it anyway */
2521 smp_rmb();
2522 this_cpu_dec(trace_buffered_event_cnt);
2523}
2524
2525static void disable_trace_buffered_event(void *data)
2526{
2527 this_cpu_inc(trace_buffered_event_cnt);
2528}
2529
2530/**
2531 * trace_buffered_event_disable - disable buffering events
2532 *
2533 * When a filter is removed, it is faster to not use the buffered
2534 * events, and to commit directly into the ring buffer. Free up
2535 * the temp buffers when there are no more users. This requires
2536 * special synchronization with current events.
2537 */
2538void trace_buffered_event_disable(void)
2539{
2540 int cpu;
2541
2542 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2543
2544 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2545 return;
2546
2547 if (--trace_buffered_event_ref)
2548 return;
2549
2550 preempt_disable();
2551 /* For each CPU, set the buffer as used. */
2552 smp_call_function_many(tracing_buffer_mask,
2553 disable_trace_buffered_event, NULL, 1);
2554 preempt_enable();
2555
2556 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002557 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002558
2559 for_each_tracing_cpu(cpu) {
2560 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2561 per_cpu(trace_buffered_event, cpu) = NULL;
2562 }
2563 /*
2564 * Make sure trace_buffered_event is NULL before clearing
2565 * trace_buffered_event_cnt.
2566 */
2567 smp_wmb();
2568
2569 preempt_disable();
2570 /* Do the work on each cpu */
2571 smp_call_function_many(tracing_buffer_mask,
2572 enable_trace_buffered_event, NULL, 1);
2573 preempt_enable();
2574}
2575
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002576static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002577
Steven Rostedtef5580d2009-02-27 19:38:04 -05002578struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002579trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002580 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002581 int type, unsigned long len,
2582 unsigned long flags, int pc)
2583{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002584 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002585 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002586
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002587 *current_rb = trace_file->tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002588
Tom Zanussi00b41452018-01-15 20:51:39 -06002589 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002590 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2591 (entry = this_cpu_read(trace_buffered_event))) {
2592 /* Try to use the per cpu buffer first */
2593 val = this_cpu_inc_return(trace_buffered_event_cnt);
2594 if (val == 1) {
2595 trace_event_setup(entry, type, flags, pc);
2596 entry->array[0] = len;
2597 return entry;
2598 }
2599 this_cpu_dec(trace_buffered_event_cnt);
2600 }
2601
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002602 entry = __trace_buffer_lock_reserve(*current_rb,
2603 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002604 /*
2605 * If tracing is off, but we have triggers enabled
2606 * we still need to look at the event data. Use the temp_buffer
2607 * to store the trace event for the tigger to use. It's recusive
2608 * safe and will not be recorded anywhere.
2609 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002610 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002611 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002612 entry = __trace_buffer_lock_reserve(*current_rb,
2613 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002614 }
2615 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002616}
2617EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2618
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002619static DEFINE_SPINLOCK(tracepoint_iter_lock);
2620static DEFINE_MUTEX(tracepoint_printk_mutex);
2621
2622static void output_printk(struct trace_event_buffer *fbuffer)
2623{
2624 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002625 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002626 struct trace_event *event;
2627 unsigned long flags;
2628 struct trace_iterator *iter = tracepoint_print_iter;
2629
2630 /* We should never get here if iter is NULL */
2631 if (WARN_ON_ONCE(!iter))
2632 return;
2633
2634 event_call = fbuffer->trace_file->event_call;
2635 if (!event_call || !event_call->event.funcs ||
2636 !event_call->event.funcs->trace)
2637 return;
2638
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002639 file = fbuffer->trace_file;
2640 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2641 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2642 !filter_match_preds(file->filter, fbuffer->entry)))
2643 return;
2644
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002645 event = &fbuffer->trace_file->event_call->event;
2646
2647 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2648 trace_seq_init(&iter->seq);
2649 iter->ent = fbuffer->entry;
2650 event_call->event.funcs->trace(iter, 0, event);
2651 trace_seq_putc(&iter->seq, 0);
2652 printk("%s", iter->seq.buffer);
2653
2654 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2655}
2656
2657int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2658 void __user *buffer, size_t *lenp,
2659 loff_t *ppos)
2660{
2661 int save_tracepoint_printk;
2662 int ret;
2663
2664 mutex_lock(&tracepoint_printk_mutex);
2665 save_tracepoint_printk = tracepoint_printk;
2666
2667 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2668
2669 /*
2670 * This will force exiting early, as tracepoint_printk
2671 * is always zero when tracepoint_printk_iter is not allocated
2672 */
2673 if (!tracepoint_print_iter)
2674 tracepoint_printk = 0;
2675
2676 if (save_tracepoint_printk == tracepoint_printk)
2677 goto out;
2678
2679 if (tracepoint_printk)
2680 static_key_enable(&tracepoint_printk_key.key);
2681 else
2682 static_key_disable(&tracepoint_printk_key.key);
2683
2684 out:
2685 mutex_unlock(&tracepoint_printk_mutex);
2686
2687 return ret;
2688}
2689
2690void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2691{
2692 if (static_key_false(&tracepoint_printk_key.key))
2693 output_printk(fbuffer);
2694
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002695 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002696 fbuffer->event, fbuffer->entry,
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002697 fbuffer->flags, fbuffer->pc, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002698}
2699EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2700
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002701/*
2702 * Skip 3:
2703 *
2704 * trace_buffer_unlock_commit_regs()
2705 * trace_event_buffer_commit()
2706 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302707 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002708# define STACK_SKIP 3
2709
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002710void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002711 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002712 struct ring_buffer_event *event,
2713 unsigned long flags, int pc,
2714 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002715{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002716 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002717
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002718 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002719 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002720 * Note, we can still get here via blktrace, wakeup tracer
2721 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002722 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002723 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002724 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002725 ftrace_trace_userstack(buffer, flags, pc);
2726}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002727
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002728/*
2729 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2730 */
2731void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002732trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002733 struct ring_buffer_event *event)
2734{
2735 __buffer_unlock_commit(buffer, event);
2736}
2737
Chunyan Zhang478409d2016-11-21 15:57:18 +08002738static void
2739trace_process_export(struct trace_export *export,
2740 struct ring_buffer_event *event)
2741{
2742 struct trace_entry *entry;
2743 unsigned int size = 0;
2744
2745 entry = ring_buffer_event_data(event);
2746 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002747 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002748}
2749
2750static DEFINE_MUTEX(ftrace_export_lock);
2751
2752static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2753
2754static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2755
2756static inline void ftrace_exports_enable(void)
2757{
2758 static_branch_enable(&ftrace_exports_enabled);
2759}
2760
2761static inline void ftrace_exports_disable(void)
2762{
2763 static_branch_disable(&ftrace_exports_enabled);
2764}
2765
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002766static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002767{
2768 struct trace_export *export;
2769
2770 preempt_disable_notrace();
2771
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002772 export = rcu_dereference_raw_check(ftrace_exports_list);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002773 while (export) {
2774 trace_process_export(export, event);
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002775 export = rcu_dereference_raw_check(export->next);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002776 }
2777
2778 preempt_enable_notrace();
2779}
2780
2781static inline void
2782add_trace_export(struct trace_export **list, struct trace_export *export)
2783{
2784 rcu_assign_pointer(export->next, *list);
2785 /*
2786 * We are entering export into the list but another
2787 * CPU might be walking that list. We need to make sure
2788 * the export->next pointer is valid before another CPU sees
2789 * the export pointer included into the list.
2790 */
2791 rcu_assign_pointer(*list, export);
2792}
2793
2794static inline int
2795rm_trace_export(struct trace_export **list, struct trace_export *export)
2796{
2797 struct trace_export **p;
2798
2799 for (p = list; *p != NULL; p = &(*p)->next)
2800 if (*p == export)
2801 break;
2802
2803 if (*p != export)
2804 return -1;
2805
2806 rcu_assign_pointer(*p, (*p)->next);
2807
2808 return 0;
2809}
2810
2811static inline void
2812add_ftrace_export(struct trace_export **list, struct trace_export *export)
2813{
2814 if (*list == NULL)
2815 ftrace_exports_enable();
2816
2817 add_trace_export(list, export);
2818}
2819
2820static inline int
2821rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2822{
2823 int ret;
2824
2825 ret = rm_trace_export(list, export);
2826 if (*list == NULL)
2827 ftrace_exports_disable();
2828
2829 return ret;
2830}
2831
2832int register_ftrace_export(struct trace_export *export)
2833{
2834 if (WARN_ON_ONCE(!export->write))
2835 return -1;
2836
2837 mutex_lock(&ftrace_export_lock);
2838
2839 add_ftrace_export(&ftrace_exports_list, export);
2840
2841 mutex_unlock(&ftrace_export_lock);
2842
2843 return 0;
2844}
2845EXPORT_SYMBOL_GPL(register_ftrace_export);
2846
2847int unregister_ftrace_export(struct trace_export *export)
2848{
2849 int ret;
2850
2851 mutex_lock(&ftrace_export_lock);
2852
2853 ret = rm_ftrace_export(&ftrace_exports_list, export);
2854
2855 mutex_unlock(&ftrace_export_lock);
2856
2857 return ret;
2858}
2859EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2860
Ingo Molnare309b412008-05-12 21:20:51 +02002861void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002862trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002863 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2864 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002865{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002866 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002867 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002868 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002869 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002870
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002871 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2872 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002873 if (!event)
2874 return;
2875 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002876 entry->ip = ip;
2877 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002878
Chunyan Zhang478409d2016-11-21 15:57:18 +08002879 if (!call_filter_check_discard(call, entry, buffer, event)) {
2880 if (static_branch_unlikely(&ftrace_exports_enabled))
2881 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002882 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002883 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002884}
2885
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002886#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002887
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002888/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2889#define FTRACE_KSTACK_NESTING 4
2890
2891#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2892
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002893struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002894 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002895};
2896
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002897
2898struct ftrace_stacks {
2899 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2900};
2901
2902static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002903static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2904
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002905static void __ftrace_trace_stack(struct trace_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002906 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002907 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002908{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002909 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002910 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002911 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002912 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002913 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002914 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002915
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002916 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002917 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002918 * If regs is set, then these functions will not be in the way.
2919 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002920#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002921 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002922 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002923#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002924
2925 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002926 * Since events can happen in NMIs there's no safe way to
2927 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2928 * or NMI comes in, it will just have to use the default
2929 * FTRACE_STACK_SIZE.
2930 */
2931 preempt_disable_notrace();
2932
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002933 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2934
2935 /* This should never happen. If it does, yell once and skip */
2936 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2937 goto out;
2938
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002939 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002940 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2941 * interrupt will either see the value pre increment or post
2942 * increment. If the interrupt happens pre increment it will have
2943 * restored the counter when it returns. We just need a barrier to
2944 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002945 */
2946 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002947
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002948 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002949 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002950
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002951 if (regs) {
2952 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2953 size, skip);
2954 } else {
2955 nr_entries = stack_trace_save(fstack->calls, size, skip);
2956 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002957
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002958 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002959 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2960 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002961 if (!event)
2962 goto out;
2963 entry = ring_buffer_event_data(event);
2964
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002965 memcpy(&entry->caller, fstack->calls, size);
2966 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002967
Tom Zanussif306cc82013-10-24 08:34:17 -05002968 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002969 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002970
2971 out:
2972 /* Again, don't let gcc optimize things here */
2973 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002974 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002975 preempt_enable_notrace();
2976
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002977}
2978
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002979static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002980 struct trace_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002981 unsigned long flags,
2982 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002983{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002984 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002985 return;
2986
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002987 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002988}
2989
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002990void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2991 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002992{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002993 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002994
2995 if (rcu_is_watching()) {
2996 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2997 return;
2998 }
2999
3000 /*
3001 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3002 * but if the above rcu_is_watching() failed, then the NMI
3003 * triggered someplace critical, and rcu_irq_enter() should
3004 * not be called from NMI.
3005 */
3006 if (unlikely(in_nmi()))
3007 return;
3008
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003009 rcu_irq_enter_irqson();
3010 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3011 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003012}
3013
Steven Rostedt03889382009-12-11 09:48:22 -05003014/**
3015 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003016 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003017 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003018void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003019{
3020 unsigned long flags;
3021
3022 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003023 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003024
3025 local_save_flags(flags);
3026
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003027#ifndef CONFIG_UNWINDER_ORC
3028 /* Skip 1 to skip this function. */
3029 skip++;
3030#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003031 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003032 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003033}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003034EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003035
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003036#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003037static DEFINE_PER_CPU(int, user_stack_count);
3038
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003039static void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003040ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003041{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003042 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003043 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003044 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003045
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003046 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003047 return;
3048
Steven Rostedtb6345872010-03-12 20:03:30 -05003049 /*
3050 * NMIs can not handle page faults, even with fix ups.
3051 * The save user stack can (and often does) fault.
3052 */
3053 if (unlikely(in_nmi()))
3054 return;
3055
Steven Rostedt91e86e52010-11-10 12:56:12 +01003056 /*
3057 * prevent recursion, since the user stack tracing may
3058 * trigger other kernel events.
3059 */
3060 preempt_disable();
3061 if (__this_cpu_read(user_stack_count))
3062 goto out;
3063
3064 __this_cpu_inc(user_stack_count);
3065
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003066 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3067 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02003068 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003069 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003070 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003071
Steven Rostedt48659d32009-09-11 11:36:23 -04003072 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003073 memset(&entry->caller, 0, sizeof(entry->caller));
3074
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003075 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003076 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003077 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003078
Li Zefan1dbd1952010-12-09 15:47:56 +08003079 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003080 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003081 out:
3082 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003083}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003084#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003085static void ftrace_trace_userstack(struct trace_buffer *buffer,
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003086 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02003087{
Török Edwin02b67512008-11-22 13:28:47 +02003088}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003089#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003090
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003091#endif /* CONFIG_STACKTRACE */
3092
Steven Rostedt07d777f2011-09-22 14:01:55 -04003093/* created for use with alloc_percpu */
3094struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003095 int nesting;
3096 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003097};
3098
3099static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003100
3101/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003102 * Thise allows for lockless recording. If we're nested too deeply, then
3103 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003104 */
3105static char *get_trace_buf(void)
3106{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003107 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003108
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003109 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003110 return NULL;
3111
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003112 buffer->nesting++;
3113
3114 /* Interrupts must see nesting incremented before we use the buffer */
3115 barrier();
3116 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003117}
3118
3119static void put_trace_buf(void)
3120{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003121 /* Don't let the decrement of nesting leak before this */
3122 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003123 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003124}
3125
3126static int alloc_percpu_trace_buffer(void)
3127{
3128 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003129
3130 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003131 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003132 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003133
3134 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003135 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003136}
3137
Steven Rostedt81698832012-10-11 10:15:05 -04003138static int buffers_allocated;
3139
Steven Rostedt07d777f2011-09-22 14:01:55 -04003140void trace_printk_init_buffers(void)
3141{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003142 if (buffers_allocated)
3143 return;
3144
3145 if (alloc_percpu_trace_buffer())
3146 return;
3147
Steven Rostedt2184db42014-05-28 13:14:40 -04003148 /* trace_printk() is for debug use only. Don't use it in production. */
3149
Joe Perchesa395d6a2016-03-22 14:28:09 -07003150 pr_warn("\n");
3151 pr_warn("**********************************************************\n");
3152 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3153 pr_warn("** **\n");
3154 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3155 pr_warn("** **\n");
3156 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3157 pr_warn("** unsafe for production use. **\n");
3158 pr_warn("** **\n");
3159 pr_warn("** If you see this message and you are not debugging **\n");
3160 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3161 pr_warn("** **\n");
3162 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3163 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003164
Steven Rostedtb382ede62012-10-10 21:44:34 -04003165 /* Expand the buffers to set size */
3166 tracing_update_buffers();
3167
Steven Rostedt07d777f2011-09-22 14:01:55 -04003168 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003169
3170 /*
3171 * trace_printk_init_buffers() can be called by modules.
3172 * If that happens, then we need to start cmdline recording
3173 * directly here. If the global_trace.buffer is already
3174 * allocated here, then this was called by module code.
3175 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003176 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003177 tracing_start_cmdline_record();
3178}
Divya Indif45d1222019-03-20 11:28:51 -07003179EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003180
3181void trace_printk_start_comm(void)
3182{
3183 /* Start tracing comms if trace printk is set */
3184 if (!buffers_allocated)
3185 return;
3186 tracing_start_cmdline_record();
3187}
3188
3189static void trace_printk_start_stop_comm(int enabled)
3190{
3191 if (!buffers_allocated)
3192 return;
3193
3194 if (enabled)
3195 tracing_start_cmdline_record();
3196 else
3197 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003198}
3199
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003200/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003201 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003202 * @ip: The address of the caller
3203 * @fmt: The string format to write to the buffer
3204 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003205 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003206int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003207{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003208 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003209 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003210 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003211 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003212 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003213 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003214 char *tbuffer;
3215 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003216
3217 if (unlikely(tracing_selftest_running || tracing_disabled))
3218 return 0;
3219
3220 /* Don't pollute graph traces with trace_vprintk internals */
3221 pause_graph_tracing();
3222
3223 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003224 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003225
Steven Rostedt07d777f2011-09-22 14:01:55 -04003226 tbuffer = get_trace_buf();
3227 if (!tbuffer) {
3228 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003229 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003230 }
3231
3232 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3233
3234 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003235 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003236
Steven Rostedt07d777f2011-09-22 14:01:55 -04003237 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003239 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003240 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003241 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3242 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003243 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003244 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003245 entry = ring_buffer_event_data(event);
3246 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003247 entry->fmt = fmt;
3248
Steven Rostedt07d777f2011-09-22 14:01:55 -04003249 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003250 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003251 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003252 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003253 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003254
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003255out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003256 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003257out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003258 put_trace_buf();
3259
3260out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003261 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003262 unpause_graph_tracing();
3263
3264 return len;
3265}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003266EXPORT_SYMBOL_GPL(trace_vbprintk);
3267
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003268__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003269static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003270__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003271 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003272{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003273 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003274 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003275 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003276 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003277 unsigned long flags;
3278 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003279
3280 if (tracing_disabled || tracing_selftest_running)
3281 return 0;
3282
Steven Rostedt07d777f2011-09-22 14:01:55 -04003283 /* Don't pollute graph traces with trace_vprintk internals */
3284 pause_graph_tracing();
3285
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003286 pc = preempt_count();
3287 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003288
Steven Rostedt07d777f2011-09-22 14:01:55 -04003289
3290 tbuffer = get_trace_buf();
3291 if (!tbuffer) {
3292 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003293 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003294 }
3295
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003296 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003297
Steven Rostedt07d777f2011-09-22 14:01:55 -04003298 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003299 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003300 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003301 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3302 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003303 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003304 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003305 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003306 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003307
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003308 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003309 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003310 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003311 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003312 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003313
3314out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003315 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003316 put_trace_buf();
3317
3318out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003319 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003320 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003321
3322 return len;
3323}
Steven Rostedt659372d2009-09-03 19:11:07 -04003324
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003325__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003326int trace_array_vprintk(struct trace_array *tr,
3327 unsigned long ip, const char *fmt, va_list args)
3328{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003329 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003330}
3331
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003332__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003333int trace_array_printk(struct trace_array *tr,
3334 unsigned long ip, const char *fmt, ...)
3335{
3336 int ret;
3337 va_list ap;
3338
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003339 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003340 return 0;
3341
Divya Indi953ae452019-08-14 10:55:25 -07003342 if (!tr)
3343 return -ENOENT;
3344
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003345 va_start(ap, fmt);
3346 ret = trace_array_vprintk(tr, ip, fmt, ap);
3347 va_end(ap);
3348 return ret;
3349}
Divya Indif45d1222019-03-20 11:28:51 -07003350EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003351
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003352__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003353int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003354 unsigned long ip, const char *fmt, ...)
3355{
3356 int ret;
3357 va_list ap;
3358
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003359 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003360 return 0;
3361
3362 va_start(ap, fmt);
3363 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3364 va_end(ap);
3365 return ret;
3366}
3367
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003368__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003369int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3370{
Steven Rostedta813a152009-10-09 01:41:35 -04003371 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003372}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003373EXPORT_SYMBOL_GPL(trace_vprintk);
3374
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003375static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003376{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003377 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3378
Steven Rostedt5a90f572008-09-03 17:42:51 -04003379 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003380 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003381 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003382}
3383
Ingo Molnare309b412008-05-12 21:20:51 +02003384static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003385peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3386 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003387{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003388 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003389 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003390
Steven Rostedtd7690412008-10-01 00:29:53 -04003391 if (buf_iter)
3392 event = ring_buffer_iter_peek(buf_iter, ts);
3393 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003394 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003395 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003396
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003397 if (event) {
3398 iter->ent_size = ring_buffer_event_length(event);
3399 return ring_buffer_event_data(event);
3400 }
3401 iter->ent_size = 0;
3402 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003403}
Steven Rostedtd7690412008-10-01 00:29:53 -04003404
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003405static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003406__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3407 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003408{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003409 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003410 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003411 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003412 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003413 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003414 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003415 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416 int cpu;
3417
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003418 /*
3419 * If we are in a per_cpu trace file, don't bother by iterating over
3420 * all cpu and peek directly.
3421 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003422 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003423 if (ring_buffer_empty_cpu(buffer, cpu_file))
3424 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003425 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003426 if (ent_cpu)
3427 *ent_cpu = cpu_file;
3428
3429 return ent;
3430 }
3431
Steven Rostedtab464282008-05-12 21:21:00 +02003432 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003433
3434 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003436
Steven Rostedtbc21b472010-03-31 19:49:26 -04003437 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003438
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003439 /*
3440 * Pick the entry with the smallest timestamp:
3441 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003442 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443 next = ent;
3444 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003445 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003446 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003447 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003448 }
3449 }
3450
Steven Rostedt12b5da32012-03-27 10:43:28 -04003451 iter->ent_size = next_size;
3452
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453 if (ent_cpu)
3454 *ent_cpu = next_cpu;
3455
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003456 if (ent_ts)
3457 *ent_ts = next_ts;
3458
Steven Rostedtbc21b472010-03-31 19:49:26 -04003459 if (missing_events)
3460 *missing_events = next_lost;
3461
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462 return next;
3463}
3464
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003465/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003466struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3467 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003468{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003469 /* __find_next_entry will reset ent_size */
3470 int ent_size = iter->ent_size;
3471 struct trace_entry *entry;
3472
3473 /*
3474 * The __find_next_entry() may call peek_next_entry(), which may
3475 * call ring_buffer_peek() that may make the contents of iter->ent
3476 * undefined. Need to copy iter->ent now.
3477 */
3478 if (iter->ent && iter->ent != iter->temp) {
3479 if (!iter->temp || iter->temp_size < iter->ent_size) {
3480 kfree(iter->temp);
3481 iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
3482 if (!iter->temp)
3483 return NULL;
3484 }
3485 memcpy(iter->temp, iter->ent, iter->ent_size);
3486 iter->temp_size = iter->ent_size;
3487 iter->ent = iter->temp;
3488 }
3489 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3490 /* Put back the original ent_size */
3491 iter->ent_size = ent_size;
3492
3493 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003494}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003495
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003496/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003497void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003498{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003499 iter->ent = __find_next_entry(iter, &iter->cpu,
3500 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003501
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003502 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003503 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003504
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003505 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003506}
3507
Ingo Molnare309b412008-05-12 21:20:51 +02003508static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003509{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003510 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003511 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003512}
3513
Ingo Molnare309b412008-05-12 21:20:51 +02003514static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003515{
3516 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003517 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003518 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003519
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003520 WARN_ON_ONCE(iter->leftover);
3521
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522 (*pos)++;
3523
3524 /* can't go backwards */
3525 if (iter->idx > i)
3526 return NULL;
3527
3528 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003529 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003530 else
3531 ent = iter;
3532
3533 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003534 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535
3536 iter->pos = *pos;
3537
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538 return ent;
3539}
3540
Jason Wessel955b61e2010-08-05 09:22:23 -05003541void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003542{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003543 struct ring_buffer_event *event;
3544 struct ring_buffer_iter *buf_iter;
3545 unsigned long entries = 0;
3546 u64 ts;
3547
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003548 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003549
Steven Rostedt6d158a82012-06-27 20:46:14 -04003550 buf_iter = trace_buffer_iter(iter, cpu);
3551 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003552 return;
3553
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003554 ring_buffer_iter_reset(buf_iter);
3555
3556 /*
3557 * We could have the case with the max latency tracers
3558 * that a reset never took place on a cpu. This is evident
3559 * by the timestamp being before the start of the buffer.
3560 */
3561 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003562 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003563 break;
3564 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003565 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003566 }
3567
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003568 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003569}
3570
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003571/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003572 * The current tracer is copied to avoid a global locking
3573 * all around.
3574 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575static void *s_start(struct seq_file *m, loff_t *pos)
3576{
3577 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003578 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003579 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003580 void *p = NULL;
3581 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003582 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003583
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003584 /*
3585 * copy the tracer to avoid using a global lock all around.
3586 * iter->trace is a copy of current_trace, the pointer to the
3587 * name may be used instead of a strcmp(), as iter->trace->name
3588 * will point to the same string as current_trace->name.
3589 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003591 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3592 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003593 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003594
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003595#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003596 if (iter->snapshot && iter->trace->use_max_tr)
3597 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003598#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003599
3600 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003601 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003602
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003603 if (*pos != iter->pos) {
3604 iter->ent = NULL;
3605 iter->cpu = 0;
3606 iter->idx = -1;
3607
Steven Rostedtae3b5092013-01-23 15:22:59 -05003608 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003609 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003610 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003611 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003612 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613
Lai Jiangshanac91d852010-03-02 17:54:50 +08003614 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3616 ;
3617
3618 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003619 /*
3620 * If we overflowed the seq_file before, then we want
3621 * to just reuse the trace_seq buffer again.
3622 */
3623 if (iter->leftover)
3624 p = iter;
3625 else {
3626 l = *pos - 1;
3627 p = s_next(m, p, &l);
3628 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003629 }
3630
Lai Jiangshan4f535962009-05-18 19:35:34 +08003631 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003632 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003633 return p;
3634}
3635
3636static void s_stop(struct seq_file *m, void *p)
3637{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003638 struct trace_iterator *iter = m->private;
3639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003640#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003641 if (iter->snapshot && iter->trace->use_max_tr)
3642 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003643#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003644
3645 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003646 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003647
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003648 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003649 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003650}
3651
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003652static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003653get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003654 unsigned long *entries, int cpu)
3655{
3656 unsigned long count;
3657
3658 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3659 /*
3660 * If this buffer has skipped entries, then we hold all
3661 * entries for the trace and we need to ignore the
3662 * ones before the time stamp.
3663 */
3664 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3665 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3666 /* total is the same as the entries */
3667 *total = count;
3668 } else
3669 *total = count +
3670 ring_buffer_overrun_cpu(buf->buffer, cpu);
3671 *entries = count;
3672}
3673
3674static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003675get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003676 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003677{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003678 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003679 int cpu;
3680
3681 *total = 0;
3682 *entries = 0;
3683
3684 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003685 get_total_entries_cpu(buf, &t, &e, cpu);
3686 *total += t;
3687 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003688 }
3689}
3690
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003691unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3692{
3693 unsigned long total, entries;
3694
3695 if (!tr)
3696 tr = &global_trace;
3697
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003698 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003699
3700 return entries;
3701}
3702
3703unsigned long trace_total_entries(struct trace_array *tr)
3704{
3705 unsigned long total, entries;
3706
3707 if (!tr)
3708 tr = &global_trace;
3709
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003710 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003711
3712 return entries;
3713}
3714
Ingo Molnare309b412008-05-12 21:20:51 +02003715static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003716{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003717 seq_puts(m, "# _------=> CPU# \n"
3718 "# / _-----=> irqs-off \n"
3719 "# | / _----=> need-resched \n"
3720 "# || / _---=> hardirq/softirq \n"
3721 "# ||| / _--=> preempt-depth \n"
3722 "# |||| / delay \n"
3723 "# cmd pid ||||| time | caller \n"
3724 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003725}
3726
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003727static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003728{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003729 unsigned long total;
3730 unsigned long entries;
3731
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003732 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003733 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3734 entries, total, num_online_cpus());
3735 seq_puts(m, "#\n");
3736}
3737
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003738static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003739 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003740{
Joel Fernandes441dae82017-06-25 22:38:43 -07003741 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3742
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003743 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003744
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003745 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3746 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003747}
3748
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003749static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07003750 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003751{
Joel Fernandes441dae82017-06-25 22:38:43 -07003752 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003753 const char *space = " ";
3754 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003755
Quentin Perret9e738212019-02-14 15:29:50 +00003756 print_event_info(buf, m);
3757
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003758 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3759 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3760 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3761 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3762 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3763 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3764 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003765}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003766
Jiri Olsa62b915f2010-04-02 19:01:22 +02003767void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003768print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3769{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003770 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003771 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003772 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003773 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003774 unsigned long entries;
3775 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003776 const char *name = "preemption";
3777
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003778 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003779
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003780 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003781
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003782 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003783 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003784 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003785 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003786 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003787 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003788 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003789 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003790 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003791 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003792#if defined(CONFIG_PREEMPT_NONE)
3793 "server",
3794#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3795 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003796#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003797 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02003798#elif defined(CONFIG_PREEMPT_RT)
3799 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003800#else
3801 "unknown",
3802#endif
3803 /* These are reserved for later use */
3804 0, 0, 0, 0);
3805#ifdef CONFIG_SMP
3806 seq_printf(m, " #P:%d)\n", num_online_cpus());
3807#else
3808 seq_puts(m, ")\n");
3809#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003810 seq_puts(m, "# -----------------\n");
3811 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003812 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003813 data->comm, data->pid,
3814 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003815 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003816 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003817
3818 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003819 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003820 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3821 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003822 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003823 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3824 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003825 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003826 }
3827
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003828 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003829}
3830
Steven Rostedta3097202008-11-07 22:36:02 -05003831static void test_cpu_buff_start(struct trace_iterator *iter)
3832{
3833 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003834 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003835
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003836 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003837 return;
3838
3839 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3840 return;
3841
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003842 if (cpumask_available(iter->started) &&
3843 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003844 return;
3845
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003846 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003847 return;
3848
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003849 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003850 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003851
3852 /* Don't print started cpu buffer for the first entry of the trace */
3853 if (iter->idx > 1)
3854 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3855 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003856}
3857
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003858static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003859{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003860 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003861 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003862 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003863 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003864 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003865
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003866 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003867
Steven Rostedta3097202008-11-07 22:36:02 -05003868 test_cpu_buff_start(iter);
3869
Steven Rostedtf633cef2008-12-23 23:24:13 -05003870 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003871
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003872 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003873 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3874 trace_print_lat_context(iter);
3875 else
3876 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003877 }
3878
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003879 if (trace_seq_has_overflowed(s))
3880 return TRACE_TYPE_PARTIAL_LINE;
3881
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003882 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003883 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003884
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003885 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003886
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003887 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003888}
3889
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003890static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003891{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003892 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003893 struct trace_seq *s = &iter->seq;
3894 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003895 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003896
3897 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003898
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003899 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003900 trace_seq_printf(s, "%d %d %llu ",
3901 entry->pid, iter->cpu, iter->ts);
3902
3903 if (trace_seq_has_overflowed(s))
3904 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003905
Steven Rostedtf633cef2008-12-23 23:24:13 -05003906 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003907 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003908 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003909
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003910 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003911
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003912 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003913}
3914
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003915static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003916{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003917 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003918 struct trace_seq *s = &iter->seq;
3919 unsigned char newline = '\n';
3920 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003921 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003922
3923 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003924
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003925 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003926 SEQ_PUT_HEX_FIELD(s, entry->pid);
3927 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3928 SEQ_PUT_HEX_FIELD(s, iter->ts);
3929 if (trace_seq_has_overflowed(s))
3930 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003931 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003932
Steven Rostedtf633cef2008-12-23 23:24:13 -05003933 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003934 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003935 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003936 if (ret != TRACE_TYPE_HANDLED)
3937 return ret;
3938 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003939
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003940 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003941
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003942 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003943}
3944
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003945static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003946{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003947 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003948 struct trace_seq *s = &iter->seq;
3949 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003950 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003951
3952 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003953
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003954 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003955 SEQ_PUT_FIELD(s, entry->pid);
3956 SEQ_PUT_FIELD(s, iter->cpu);
3957 SEQ_PUT_FIELD(s, iter->ts);
3958 if (trace_seq_has_overflowed(s))
3959 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003960 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003961
Steven Rostedtf633cef2008-12-23 23:24:13 -05003962 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003963 return event ? event->funcs->binary(iter, 0, event) :
3964 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003965}
3966
Jiri Olsa62b915f2010-04-02 19:01:22 +02003967int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003968{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003969 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003970 int cpu;
3971
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003972 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003973 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003974 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003975 buf_iter = trace_buffer_iter(iter, cpu);
3976 if (buf_iter) {
3977 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003978 return 0;
3979 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003980 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003981 return 0;
3982 }
3983 return 1;
3984 }
3985
Steven Rostedtab464282008-05-12 21:21:00 +02003986 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003987 buf_iter = trace_buffer_iter(iter, cpu);
3988 if (buf_iter) {
3989 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003990 return 0;
3991 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003992 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003993 return 0;
3994 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003995 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003996
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003997 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003998}
3999
Lai Jiangshan4f535962009-05-18 19:35:34 +08004000/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004001enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004002{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004003 struct trace_array *tr = iter->tr;
4004 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004005 enum print_line_t ret;
4006
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004007 if (iter->lost_events) {
4008 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4009 iter->cpu, iter->lost_events);
4010 if (trace_seq_has_overflowed(&iter->seq))
4011 return TRACE_TYPE_PARTIAL_LINE;
4012 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004013
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004014 if (iter->trace && iter->trace->print_line) {
4015 ret = iter->trace->print_line(iter);
4016 if (ret != TRACE_TYPE_UNHANDLED)
4017 return ret;
4018 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004019
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004020 if (iter->ent->type == TRACE_BPUTS &&
4021 trace_flags & TRACE_ITER_PRINTK &&
4022 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4023 return trace_print_bputs_msg_only(iter);
4024
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004025 if (iter->ent->type == TRACE_BPRINT &&
4026 trace_flags & TRACE_ITER_PRINTK &&
4027 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004028 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004029
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004030 if (iter->ent->type == TRACE_PRINT &&
4031 trace_flags & TRACE_ITER_PRINTK &&
4032 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004033 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004034
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004035 if (trace_flags & TRACE_ITER_BIN)
4036 return print_bin_fmt(iter);
4037
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004038 if (trace_flags & TRACE_ITER_HEX)
4039 return print_hex_fmt(iter);
4040
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004041 if (trace_flags & TRACE_ITER_RAW)
4042 return print_raw_fmt(iter);
4043
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004044 return print_trace_fmt(iter);
4045}
4046
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004047void trace_latency_header(struct seq_file *m)
4048{
4049 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004050 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004051
4052 /* print nothing if the buffers are empty */
4053 if (trace_empty(iter))
4054 return;
4055
4056 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4057 print_trace_header(m, iter);
4058
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004059 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004060 print_lat_help_header(m);
4061}
4062
Jiri Olsa62b915f2010-04-02 19:01:22 +02004063void trace_default_header(struct seq_file *m)
4064{
4065 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004066 struct trace_array *tr = iter->tr;
4067 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004068
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004069 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4070 return;
4071
Jiri Olsa62b915f2010-04-02 19:01:22 +02004072 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4073 /* print nothing if the buffers are empty */
4074 if (trace_empty(iter))
4075 return;
4076 print_trace_header(m, iter);
4077 if (!(trace_flags & TRACE_ITER_VERBOSE))
4078 print_lat_help_header(m);
4079 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004080 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4081 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004082 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004083 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004084 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004085 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004086 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004087 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004088 }
4089}
4090
Steven Rostedte0a413f2011-09-29 21:26:16 -04004091static void test_ftrace_alive(struct seq_file *m)
4092{
4093 if (!ftrace_is_dead())
4094 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004095 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4096 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004097}
4098
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004099#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004100static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004101{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004102 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4103 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4104 "# Takes a snapshot of the main buffer.\n"
4105 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4106 "# (Doesn't have to be '2' works with any number that\n"
4107 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004108}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004109
4110static void show_snapshot_percpu_help(struct seq_file *m)
4111{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004112 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004113#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004114 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4115 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004116#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004117 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4118 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004119#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004120 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4121 "# (Doesn't have to be '2' works with any number that\n"
4122 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004123}
4124
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004125static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4126{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004127 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004128 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004129 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004130 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004131
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004132 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004133 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4134 show_snapshot_main_help(m);
4135 else
4136 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004137}
4138#else
4139/* Should never be called */
4140static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4141#endif
4142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004143static int s_show(struct seq_file *m, void *v)
4144{
4145 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004146 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004147
4148 if (iter->ent == NULL) {
4149 if (iter->tr) {
4150 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4151 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004152 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004153 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004154 if (iter->snapshot && trace_empty(iter))
4155 print_snapshot_help(m, iter);
4156 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004157 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004158 else
4159 trace_default_header(m);
4160
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004161 } else if (iter->leftover) {
4162 /*
4163 * If we filled the seq_file buffer earlier, we
4164 * want to just show it now.
4165 */
4166 ret = trace_print_seq(m, &iter->seq);
4167
4168 /* ret should this time be zero, but you never know */
4169 iter->leftover = ret;
4170
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004171 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004172 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004173 ret = trace_print_seq(m, &iter->seq);
4174 /*
4175 * If we overflow the seq_file buffer, then it will
4176 * ask us for this data again at start up.
4177 * Use that instead.
4178 * ret is 0 if seq_file write succeeded.
4179 * -1 otherwise.
4180 */
4181 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004182 }
4183
4184 return 0;
4185}
4186
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004187/*
4188 * Should be used after trace_array_get(), trace_types_lock
4189 * ensures that i_cdev was already initialized.
4190 */
4191static inline int tracing_get_cpu(struct inode *inode)
4192{
4193 if (inode->i_cdev) /* See trace_create_cpu_file() */
4194 return (long)inode->i_cdev - 1;
4195 return RING_BUFFER_ALL_CPUS;
4196}
4197
James Morris88e9d342009-09-22 16:43:43 -07004198static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004199 .start = s_start,
4200 .next = s_next,
4201 .stop = s_stop,
4202 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004203};
4204
Ingo Molnare309b412008-05-12 21:20:51 +02004205static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004206__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004208 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004210 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004211
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004212 if (tracing_disabled)
4213 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004214
Jiri Olsa50e18b92012-04-25 10:23:39 +02004215 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004216 if (!iter)
4217 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004218
Gil Fruchter72917232015-06-09 10:32:35 +03004219 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004220 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004221 if (!iter->buffer_iter)
4222 goto release;
4223
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004224 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004225 * trace_find_next_entry() may need to save off iter->ent.
4226 * It will place it into the iter->temp buffer. As most
4227 * events are less than 128, allocate a buffer of that size.
4228 * If one is greater, then trace_find_next_entry() will
4229 * allocate a new buffer to adjust for the bigger iter->ent.
4230 * It's not critical if it fails to get allocated here.
4231 */
4232 iter->temp = kmalloc(128, GFP_KERNEL);
4233 if (iter->temp)
4234 iter->temp_size = 128;
4235
4236 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004237 * We make a copy of the current tracer to avoid concurrent
4238 * changes on it while we are reading.
4239 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004240 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004241 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004242 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004243 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004244
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004245 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004246
Li Zefan79f55992009-06-15 14:58:26 +08004247 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004248 goto fail;
4249
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004250 iter->tr = tr;
4251
4252#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004253 /* Currently only the top directory has a snapshot */
4254 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004255 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004256 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004257#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004258 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004259 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004260 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004261 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004262 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004263
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004264 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004265 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004266 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004267
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004268 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004269 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004270 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4271
David Sharp8be07092012-11-13 12:18:22 -08004272 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004273 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004274 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4275
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004276 /* stop the trace while dumping if we are not opening "snapshot" */
4277 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004278 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004279
Steven Rostedtae3b5092013-01-23 15:22:59 -05004280 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004281 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004282 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004283 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004284 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004285 }
4286 ring_buffer_read_prepare_sync();
4287 for_each_tracing_cpu(cpu) {
4288 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004289 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004290 }
4291 } else {
4292 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004293 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004294 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004295 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004296 ring_buffer_read_prepare_sync();
4297 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004298 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004299 }
4300
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004301 mutex_unlock(&trace_types_lock);
4302
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004303 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004304
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004305 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004306 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004307 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004308 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004309 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004310release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004311 seq_release_private(inode, file);
4312 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004313}
4314
4315int tracing_open_generic(struct inode *inode, struct file *filp)
4316{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004317 int ret;
4318
4319 ret = tracing_check_open_get_tr(NULL);
4320 if (ret)
4321 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004322
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004323 filp->private_data = inode->i_private;
4324 return 0;
4325}
4326
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004327bool tracing_is_disabled(void)
4328{
4329 return (tracing_disabled) ? true: false;
4330}
4331
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004332/*
4333 * Open and update trace_array ref count.
4334 * Must have the current trace_array passed to it.
4335 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004336int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004337{
4338 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004339 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004340
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004341 ret = tracing_check_open_get_tr(tr);
4342 if (ret)
4343 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004344
4345 filp->private_data = inode->i_private;
4346
4347 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004348}
4349
Hannes Eder4fd27352009-02-10 19:44:12 +01004350static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004351{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004352 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004353 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004354 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004355 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004356
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004357 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004358 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004359 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004360 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004361
Oleg Nesterov6484c712013-07-23 17:26:10 +02004362 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004363 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004364 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004365
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004366 for_each_tracing_cpu(cpu) {
4367 if (iter->buffer_iter[cpu])
4368 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4369 }
4370
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004371 if (iter->trace && iter->trace->close)
4372 iter->trace->close(iter);
4373
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004374 if (!iter->snapshot)
4375 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004376 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004377
4378 __trace_array_put(tr);
4379
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004380 mutex_unlock(&trace_types_lock);
4381
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004382 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004383 free_cpumask_var(iter->started);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004384 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004385 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004386 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004387 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004388
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004389 return 0;
4390}
4391
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004392static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4393{
4394 struct trace_array *tr = inode->i_private;
4395
4396 trace_array_put(tr);
4397 return 0;
4398}
4399
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004400static int tracing_single_release_tr(struct inode *inode, struct file *file)
4401{
4402 struct trace_array *tr = inode->i_private;
4403
4404 trace_array_put(tr);
4405
4406 return single_release(inode, file);
4407}
4408
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004409static int tracing_open(struct inode *inode, struct file *file)
4410{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004411 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004412 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004413 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004414
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004415 ret = tracing_check_open_get_tr(tr);
4416 if (ret)
4417 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004418
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004419 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004420 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4421 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004422 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004423
4424#ifdef CONFIG_TRACER_MAX_TRACE
4425 if (tr->current_trace->print_max)
4426 trace_buf = &tr->max_buffer;
4427#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004428
4429 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004430 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004431 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004432 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004433 }
4434
4435 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004436 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004437 if (IS_ERR(iter))
4438 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004439 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004440 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4441 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004442
4443 if (ret < 0)
4444 trace_array_put(tr);
4445
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004446 return ret;
4447}
4448
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004449/*
4450 * Some tracers are not suitable for instance buffers.
4451 * A tracer is always available for the global array (toplevel)
4452 * or if it explicitly states that it is.
4453 */
4454static bool
4455trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4456{
4457 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4458}
4459
4460/* Find the next tracer that this trace array may use */
4461static struct tracer *
4462get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4463{
4464 while (t && !trace_ok_for_array(t, tr))
4465 t = t->next;
4466
4467 return t;
4468}
4469
Ingo Molnare309b412008-05-12 21:20:51 +02004470static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004471t_next(struct seq_file *m, void *v, loff_t *pos)
4472{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004473 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004474 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004475
4476 (*pos)++;
4477
4478 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004479 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004480
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004481 return t;
4482}
4483
4484static void *t_start(struct seq_file *m, loff_t *pos)
4485{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004486 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004487 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004488 loff_t l = 0;
4489
4490 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004491
4492 t = get_tracer_for_array(tr, trace_types);
4493 for (; t && l < *pos; t = t_next(m, t, &l))
4494 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004495
4496 return t;
4497}
4498
4499static void t_stop(struct seq_file *m, void *p)
4500{
4501 mutex_unlock(&trace_types_lock);
4502}
4503
4504static int t_show(struct seq_file *m, void *v)
4505{
4506 struct tracer *t = v;
4507
4508 if (!t)
4509 return 0;
4510
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004511 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004512 if (t->next)
4513 seq_putc(m, ' ');
4514 else
4515 seq_putc(m, '\n');
4516
4517 return 0;
4518}
4519
James Morris88e9d342009-09-22 16:43:43 -07004520static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004521 .start = t_start,
4522 .next = t_next,
4523 .stop = t_stop,
4524 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004525};
4526
4527static int show_traces_open(struct inode *inode, struct file *file)
4528{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004529 struct trace_array *tr = inode->i_private;
4530 struct seq_file *m;
4531 int ret;
4532
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004533 ret = tracing_check_open_get_tr(tr);
4534 if (ret)
4535 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004536
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004537 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004538 if (ret) {
4539 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004540 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004541 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004542
4543 m = file->private_data;
4544 m->private = tr;
4545
4546 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004547}
4548
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004549static int show_traces_release(struct inode *inode, struct file *file)
4550{
4551 struct trace_array *tr = inode->i_private;
4552
4553 trace_array_put(tr);
4554 return seq_release(inode, file);
4555}
4556
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004557static ssize_t
4558tracing_write_stub(struct file *filp, const char __user *ubuf,
4559 size_t count, loff_t *ppos)
4560{
4561 return count;
4562}
4563
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004564loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004565{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004566 int ret;
4567
Slava Pestov364829b2010-11-24 15:13:16 -08004568 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004569 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004570 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004571 file->f_pos = ret = 0;
4572
4573 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004574}
4575
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004576static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004577 .open = tracing_open,
4578 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004579 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004580 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004581 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004582};
4583
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004584static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004585 .open = show_traces_open,
4586 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004587 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004588 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004589};
4590
4591static ssize_t
4592tracing_cpumask_read(struct file *filp, char __user *ubuf,
4593 size_t count, loff_t *ppos)
4594{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004595 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004596 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004597 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004598
Changbin Du90e406f2017-11-30 11:39:43 +08004599 len = snprintf(NULL, 0, "%*pb\n",
4600 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4601 mask_str = kmalloc(len, GFP_KERNEL);
4602 if (!mask_str)
4603 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004604
Changbin Du90e406f2017-11-30 11:39:43 +08004605 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004606 cpumask_pr_args(tr->tracing_cpumask));
4607 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004608 count = -EINVAL;
4609 goto out_err;
4610 }
Changbin Du90e406f2017-11-30 11:39:43 +08004611 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004612
4613out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004614 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004615
4616 return count;
4617}
4618
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004619int tracing_set_cpumask(struct trace_array *tr,
4620 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004621{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004622 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304623
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004624 if (!tr)
4625 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004626
Steven Rostedta5e25882008-12-02 15:34:05 -05004627 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004628 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004629 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004630 /*
4631 * Increase/decrease the disabled counter if we are
4632 * about to flip a bit in the cpumask:
4633 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004634 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304635 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004636 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4637 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004638 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004639 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304640 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004641 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4642 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004643 }
4644 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004645 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004646 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004647
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004648 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004649
4650 return 0;
4651}
4652
4653static ssize_t
4654tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4655 size_t count, loff_t *ppos)
4656{
4657 struct trace_array *tr = file_inode(filp)->i_private;
4658 cpumask_var_t tracing_cpumask_new;
4659 int err;
4660
4661 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4662 return -ENOMEM;
4663
4664 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4665 if (err)
4666 goto err_free;
4667
4668 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4669 if (err)
4670 goto err_free;
4671
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304672 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004673
Ingo Molnarc7078de2008-05-12 21:20:52 +02004674 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004675
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004676err_free:
Li Zefan215368e2009-06-15 10:56:42 +08004677 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004678
4679 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004680}
4681
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004682static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004683 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004684 .read = tracing_cpumask_read,
4685 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004686 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004687 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004688};
4689
Li Zefanfdb372e2009-12-08 11:15:59 +08004690static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004691{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004692 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004693 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004694 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004695 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004696
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004697 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004698 tracer_flags = tr->current_trace->flags->val;
4699 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004700
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004701 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004702 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004703 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004704 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004705 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004706 }
4707
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004708 for (i = 0; trace_opts[i].name; i++) {
4709 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004710 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004711 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004712 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004713 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004714 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004715
Li Zefanfdb372e2009-12-08 11:15:59 +08004716 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004717}
4718
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004719static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004720 struct tracer_flags *tracer_flags,
4721 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004722{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004723 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004724 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004725
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004726 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004727 if (ret)
4728 return ret;
4729
4730 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004731 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004732 else
Zhaolei77708412009-08-07 18:53:21 +08004733 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004734 return 0;
4735}
4736
Li Zefan8d18eaa2009-12-08 11:17:06 +08004737/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004738static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004739{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004740 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004741 struct tracer_flags *tracer_flags = trace->flags;
4742 struct tracer_opt *opts = NULL;
4743 int i;
4744
4745 for (i = 0; tracer_flags->opts[i].name; i++) {
4746 opts = &tracer_flags->opts[i];
4747
4748 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004749 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004750 }
4751
4752 return -EINVAL;
4753}
4754
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004755/* Some tracers require overwrite to stay enabled */
4756int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4757{
4758 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4759 return -1;
4760
4761 return 0;
4762}
4763
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004764int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004765{
Prateek Sood3a53acf2019-12-10 09:15:16 +00004766 if ((mask == TRACE_ITER_RECORD_TGID) ||
4767 (mask == TRACE_ITER_RECORD_CMD))
4768 lockdep_assert_held(&event_mutex);
4769
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004770 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004771 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004772 return 0;
4773
4774 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004775 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004776 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004777 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004778
4779 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004780 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004781 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004782 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004783
4784 if (mask == TRACE_ITER_RECORD_CMD)
4785 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004786
Joel Fernandesd914ba32017-06-26 19:01:55 -07004787 if (mask == TRACE_ITER_RECORD_TGID) {
4788 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08004789 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07004790 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004791 GFP_KERNEL);
4792 if (!tgid_map) {
4793 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4794 return -ENOMEM;
4795 }
4796
4797 trace_event_enable_tgid_record(enabled);
4798 }
4799
Steven Rostedtc37775d2016-04-13 16:59:18 -04004800 if (mask == TRACE_ITER_EVENT_FORK)
4801 trace_event_follow_fork(tr, enabled);
4802
Namhyung Kim1e104862017-04-17 11:44:28 +09004803 if (mask == TRACE_ITER_FUNC_FORK)
4804 ftrace_pid_follow_fork(tr, enabled);
4805
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004806 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004807 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004808#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004809 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004810#endif
4811 }
Steven Rostedt81698832012-10-11 10:15:05 -04004812
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004813 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004814 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004815 trace_printk_control(enabled);
4816 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004817
4818 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004819}
4820
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09004821int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004822{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004823 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004824 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004825 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004826 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004827 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004828
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004829 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004830
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004831 len = str_has_prefix(cmp, "no");
4832 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004833 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004834
4835 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004836
Prateek Sood3a53acf2019-12-10 09:15:16 +00004837 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004838 mutex_lock(&trace_types_lock);
4839
Yisheng Xie591a0332018-05-17 16:36:03 +08004840 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004841 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004842 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004843 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004844 else
4845 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004846
4847 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00004848 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004849
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004850 /*
4851 * If the first trailing whitespace is replaced with '\0' by strstrip,
4852 * turn it back into a space.
4853 */
4854 if (orig_len > strlen(option))
4855 option[strlen(option)] = ' ';
4856
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004857 return ret;
4858}
4859
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004860static void __init apply_trace_boot_options(void)
4861{
4862 char *buf = trace_boot_options_buf;
4863 char *option;
4864
4865 while (true) {
4866 option = strsep(&buf, ",");
4867
4868 if (!option)
4869 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004870
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004871 if (*option)
4872 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004873
4874 /* Put back the comma to allow this to be called again */
4875 if (buf)
4876 *(buf - 1) = ',';
4877 }
4878}
4879
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004880static ssize_t
4881tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4882 size_t cnt, loff_t *ppos)
4883{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004884 struct seq_file *m = filp->private_data;
4885 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004886 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004887 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004888
4889 if (cnt >= sizeof(buf))
4890 return -EINVAL;
4891
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004892 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004893 return -EFAULT;
4894
Steven Rostedta8dd2172013-01-09 20:54:17 -05004895 buf[cnt] = 0;
4896
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004897 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004898 if (ret < 0)
4899 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004900
Jiri Olsacf8517c2009-10-23 19:36:16 -04004901 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004902
4903 return cnt;
4904}
4905
Li Zefanfdb372e2009-12-08 11:15:59 +08004906static int tracing_trace_options_open(struct inode *inode, struct file *file)
4907{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004908 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004909 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004910
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004911 ret = tracing_check_open_get_tr(tr);
4912 if (ret)
4913 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004914
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004915 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4916 if (ret < 0)
4917 trace_array_put(tr);
4918
4919 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004920}
4921
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004922static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004923 .open = tracing_trace_options_open,
4924 .read = seq_read,
4925 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004926 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004927 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004928};
4929
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004930static const char readme_msg[] =
4931 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004932 "# echo 0 > tracing_on : quick way to disable tracing\n"
4933 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4934 " Important files:\n"
4935 " trace\t\t\t- The static contents of the buffer\n"
4936 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4937 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4938 " current_tracer\t- function and latency tracers\n"
4939 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05004940 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004941 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4942 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4943 " trace_clock\t\t-change the clock used to order events\n"
4944 " local: Per cpu clock but may not be synced across CPUs\n"
4945 " global: Synced across CPUs but slows tracing down.\n"
4946 " counter: Not a clock, but just an increment\n"
4947 " uptime: Jiffy counter from time of boot\n"
4948 " perf: Same clock that perf events use\n"
4949#ifdef CONFIG_X86_64
4950 " x86-tsc: TSC cycle counter\n"
4951#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004952 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4953 " delta: Delta difference against a buffer-wide timestamp\n"
4954 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004955 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004956 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004957 " tracing_cpumask\t- Limit which CPUs to trace\n"
4958 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4959 "\t\t\t Remove sub-buffer with rmdir\n"
4960 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08004961 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004962 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004963 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004964#ifdef CONFIG_DYNAMIC_FTRACE
4965 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004966 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4967 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004968 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004969 "\t modules: Can select a group via module\n"
4970 "\t Format: :mod:<module-name>\n"
4971 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4972 "\t triggers: a command to perform when function is hit\n"
4973 "\t Format: <function>:<trigger>[:count]\n"
4974 "\t trigger: traceon, traceoff\n"
4975 "\t\t enable_event:<system>:<event>\n"
4976 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004977#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004978 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004979#endif
4980#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004981 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004982#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004983 "\t\t dump\n"
4984 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004985 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4986 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4987 "\t The first one will disable tracing every time do_fault is hit\n"
4988 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4989 "\t The first time do trap is hit and it disables tracing, the\n"
4990 "\t counter will decrement to 2. If tracing is already disabled,\n"
4991 "\t the counter will not decrement. It only decrements when the\n"
4992 "\t trigger did work\n"
4993 "\t To remove trigger without count:\n"
4994 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4995 "\t To remove trigger with a count:\n"
4996 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004997 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004998 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4999 "\t modules: Can select a group via module command :mod:\n"
5000 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005001#endif /* CONFIG_DYNAMIC_FTRACE */
5002#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005003 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5004 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005005#endif
5006#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5007 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005008 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005009 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5010#endif
5011#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005012 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5013 "\t\t\t snapshot buffer. Read the contents for more\n"
5014 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005015#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005016#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005017 " stack_trace\t\t- Shows the max stack trace when active\n"
5018 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005019 "\t\t\t Write into this file to reset the max size (trigger a\n"
5020 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005021#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005022 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5023 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005024#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005025#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005026#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005027 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005028 "\t\t\t Write into this file to define/undefine new trace events.\n"
5029#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005030#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005031 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005032 "\t\t\t Write into this file to define/undefine new trace events.\n"
5033#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005034#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005035 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005036 "\t\t\t Write into this file to define/undefine new trace events.\n"
5037#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005038#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005039 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005040 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5041 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005042#ifdef CONFIG_HIST_TRIGGERS
5043 "\t s:[synthetic/]<event> <field> [<field>]\n"
5044#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005045 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005046#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005047 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05305048 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005049#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005050#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05305051 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005052#endif
5053 "\t args: <name>=fetcharg[:type]\n"
5054 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005055#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005056 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005057#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005058 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005059#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005060 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005061 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005062 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005063 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005064#ifdef CONFIG_HIST_TRIGGERS
5065 "\t field: <stype> <name>;\n"
5066 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5067 "\t [unsigned] char/int/long\n"
5068#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005069#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005070 " events/\t\t- Directory containing all trace event subsystems:\n"
5071 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5072 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005073 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5074 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005075 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005076 " events/<system>/<event>/\t- Directory containing control files for\n"
5077 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005078 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5079 " filter\t\t- If set, only events passing filter are traced\n"
5080 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005081 "\t Format: <trigger>[:count][if <filter>]\n"
5082 "\t trigger: traceon, traceoff\n"
5083 "\t enable_event:<system>:<event>\n"
5084 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005085#ifdef CONFIG_HIST_TRIGGERS
5086 "\t enable_hist:<system>:<event>\n"
5087 "\t disable_hist:<system>:<event>\n"
5088#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005089#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005090 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005091#endif
5092#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005093 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005094#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005095#ifdef CONFIG_HIST_TRIGGERS
5096 "\t\t hist (see below)\n"
5097#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005098 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5099 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5100 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5101 "\t events/block/block_unplug/trigger\n"
5102 "\t The first disables tracing every time block_unplug is hit.\n"
5103 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5104 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5105 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5106 "\t Like function triggers, the counter is only decremented if it\n"
5107 "\t enabled or disabled tracing.\n"
5108 "\t To remove a trigger without a count:\n"
5109 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5110 "\t To remove a trigger with a count:\n"
5111 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5112 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005113#ifdef CONFIG_HIST_TRIGGERS
5114 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005115 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005116 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005117 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005118 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005119 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005120 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005121 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005122 "\t [if <filter>]\n\n"
5123 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005124 "\t table using the key(s) and value(s) named, and the value of a\n"
5125 "\t sum called 'hitcount' is incremented. Keys and values\n"
5126 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005127 "\t can be any field, or the special string 'stacktrace'.\n"
5128 "\t Compound keys consisting of up to two fields can be specified\n"
5129 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5130 "\t fields. Sort keys consisting of up to two fields can be\n"
5131 "\t specified using the 'sort' keyword. The sort direction can\n"
5132 "\t be modified by appending '.descending' or '.ascending' to a\n"
5133 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005134 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5135 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5136 "\t its histogram data will be shared with other triggers of the\n"
5137 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005138 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005139 "\t table in its entirety to stdout. If there are multiple hist\n"
5140 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005141 "\t trigger in the output. The table displayed for a named\n"
5142 "\t trigger will be the same as any other instance having the\n"
5143 "\t same name. The default format used to display a given field\n"
5144 "\t can be modified by appending any of the following modifiers\n"
5145 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005146 "\t .hex display a number as a hex value\n"
5147 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005148 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005149 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005150 "\t .syscall display a syscall id as a syscall name\n"
5151 "\t .log2 display log2 value rather than raw number\n"
5152 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005153 "\t The 'pause' parameter can be used to pause an existing hist\n"
5154 "\t trigger or to start a hist trigger but not log any events\n"
5155 "\t until told to do so. 'continue' can be used to start or\n"
5156 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005157 "\t The 'clear' parameter will clear the contents of a running\n"
5158 "\t hist trigger and leave its current paused/active state\n"
5159 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005160 "\t The enable_hist and disable_hist triggers can be used to\n"
5161 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005162 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005163 "\t the enable_event and disable_event triggers.\n\n"
5164 "\t Hist trigger handlers and actions are executed whenever a\n"
5165 "\t a histogram entry is added or updated. They take the form:\n\n"
5166 "\t <handler>.<action>\n\n"
5167 "\t The available handlers are:\n\n"
5168 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005169 "\t onmax(var) - invoke if var exceeds current max\n"
5170 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005171 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005172 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005173 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005174#ifdef CONFIG_TRACER_SNAPSHOT
5175 "\t snapshot() - snapshot the trace buffer\n"
5176#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005177#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005178;
5179
5180static ssize_t
5181tracing_readme_read(struct file *filp, char __user *ubuf,
5182 size_t cnt, loff_t *ppos)
5183{
5184 return simple_read_from_buffer(ubuf, cnt, ppos,
5185 readme_msg, strlen(readme_msg));
5186}
5187
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005188static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005189 .open = tracing_open_generic,
5190 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005191 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005192};
5193
Michael Sartain99c621d2017-07-05 22:07:15 -06005194static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5195{
5196 int *ptr = v;
5197
5198 if (*pos || m->count)
5199 ptr++;
5200
5201 (*pos)++;
5202
5203 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5204 if (trace_find_tgid(*ptr))
5205 return ptr;
5206 }
5207
5208 return NULL;
5209}
5210
5211static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5212{
5213 void *v;
5214 loff_t l = 0;
5215
5216 if (!tgid_map)
5217 return NULL;
5218
5219 v = &tgid_map[0];
5220 while (l <= *pos) {
5221 v = saved_tgids_next(m, v, &l);
5222 if (!v)
5223 return NULL;
5224 }
5225
5226 return v;
5227}
5228
5229static void saved_tgids_stop(struct seq_file *m, void *v)
5230{
5231}
5232
5233static int saved_tgids_show(struct seq_file *m, void *v)
5234{
5235 int pid = (int *)v - tgid_map;
5236
5237 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5238 return 0;
5239}
5240
5241static const struct seq_operations tracing_saved_tgids_seq_ops = {
5242 .start = saved_tgids_start,
5243 .stop = saved_tgids_stop,
5244 .next = saved_tgids_next,
5245 .show = saved_tgids_show,
5246};
5247
5248static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5249{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005250 int ret;
5251
5252 ret = tracing_check_open_get_tr(NULL);
5253 if (ret)
5254 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005255
5256 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5257}
5258
5259
5260static const struct file_operations tracing_saved_tgids_fops = {
5261 .open = tracing_saved_tgids_open,
5262 .read = seq_read,
5263 .llseek = seq_lseek,
5264 .release = seq_release,
5265};
5266
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005267static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005268{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005269 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005270
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005271 if (*pos || m->count)
5272 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005273
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005274 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005275
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005276 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5277 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005278 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005279 continue;
5280
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005281 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005282 }
5283
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005284 return NULL;
5285}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005286
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005287static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5288{
5289 void *v;
5290 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005291
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005292 preempt_disable();
5293 arch_spin_lock(&trace_cmdline_lock);
5294
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005295 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005296 while (l <= *pos) {
5297 v = saved_cmdlines_next(m, v, &l);
5298 if (!v)
5299 return NULL;
5300 }
5301
5302 return v;
5303}
5304
5305static void saved_cmdlines_stop(struct seq_file *m, void *v)
5306{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005307 arch_spin_unlock(&trace_cmdline_lock);
5308 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005309}
5310
5311static int saved_cmdlines_show(struct seq_file *m, void *v)
5312{
5313 char buf[TASK_COMM_LEN];
5314 unsigned int *pid = v;
5315
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005316 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005317 seq_printf(m, "%d %s\n", *pid, buf);
5318 return 0;
5319}
5320
5321static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5322 .start = saved_cmdlines_start,
5323 .next = saved_cmdlines_next,
5324 .stop = saved_cmdlines_stop,
5325 .show = saved_cmdlines_show,
5326};
5327
5328static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5329{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005330 int ret;
5331
5332 ret = tracing_check_open_get_tr(NULL);
5333 if (ret)
5334 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005335
5336 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005337}
5338
5339static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005340 .open = tracing_saved_cmdlines_open,
5341 .read = seq_read,
5342 .llseek = seq_lseek,
5343 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005344};
5345
5346static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005347tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5348 size_t cnt, loff_t *ppos)
5349{
5350 char buf[64];
5351 int r;
5352
5353 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005354 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005355 arch_spin_unlock(&trace_cmdline_lock);
5356
5357 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5358}
5359
5360static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5361{
5362 kfree(s->saved_cmdlines);
5363 kfree(s->map_cmdline_to_pid);
5364 kfree(s);
5365}
5366
5367static int tracing_resize_saved_cmdlines(unsigned int val)
5368{
5369 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5370
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005371 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005372 if (!s)
5373 return -ENOMEM;
5374
5375 if (allocate_cmdlines_buffer(val, s) < 0) {
5376 kfree(s);
5377 return -ENOMEM;
5378 }
5379
5380 arch_spin_lock(&trace_cmdline_lock);
5381 savedcmd_temp = savedcmd;
5382 savedcmd = s;
5383 arch_spin_unlock(&trace_cmdline_lock);
5384 free_saved_cmdlines_buffer(savedcmd_temp);
5385
5386 return 0;
5387}
5388
5389static ssize_t
5390tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5391 size_t cnt, loff_t *ppos)
5392{
5393 unsigned long val;
5394 int ret;
5395
5396 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5397 if (ret)
5398 return ret;
5399
5400 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5401 if (!val || val > PID_MAX_DEFAULT)
5402 return -EINVAL;
5403
5404 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5405 if (ret < 0)
5406 return ret;
5407
5408 *ppos += cnt;
5409
5410 return cnt;
5411}
5412
5413static const struct file_operations tracing_saved_cmdlines_size_fops = {
5414 .open = tracing_open_generic,
5415 .read = tracing_saved_cmdlines_size_read,
5416 .write = tracing_saved_cmdlines_size_write,
5417};
5418
Jeremy Linton681bec02017-05-31 16:56:53 -05005419#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005420static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005421update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005422{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005423 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005424 if (ptr->tail.next) {
5425 ptr = ptr->tail.next;
5426 /* Set ptr to the next real item (skip head) */
5427 ptr++;
5428 } else
5429 return NULL;
5430 }
5431 return ptr;
5432}
5433
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005434static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005435{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005436 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005437
5438 /*
5439 * Paranoid! If ptr points to end, we don't want to increment past it.
5440 * This really should never happen.
5441 */
Vasily Averin039958a2020-01-24 10:03:01 +03005442 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005443 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005444 if (WARN_ON_ONCE(!ptr))
5445 return NULL;
5446
5447 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005448 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005449
5450 return ptr;
5451}
5452
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005453static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005454{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005455 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005456 loff_t l = 0;
5457
Jeremy Linton1793ed92017-05-31 16:56:46 -05005458 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005459
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005460 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005461 if (v)
5462 v++;
5463
5464 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005465 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005466 }
5467
5468 return v;
5469}
5470
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005471static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005472{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005473 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005474}
5475
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005476static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005477{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005478 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005479
5480 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005481 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005482 ptr->map.system);
5483
5484 return 0;
5485}
5486
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005487static const struct seq_operations tracing_eval_map_seq_ops = {
5488 .start = eval_map_start,
5489 .next = eval_map_next,
5490 .stop = eval_map_stop,
5491 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005492};
5493
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005494static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005495{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005496 int ret;
5497
5498 ret = tracing_check_open_get_tr(NULL);
5499 if (ret)
5500 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005501
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005502 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005503}
5504
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005505static const struct file_operations tracing_eval_map_fops = {
5506 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005507 .read = seq_read,
5508 .llseek = seq_lseek,
5509 .release = seq_release,
5510};
5511
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005512static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005513trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005514{
5515 /* Return tail of array given the head */
5516 return ptr + ptr->head.length + 1;
5517}
5518
5519static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005520trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005521 int len)
5522{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005523 struct trace_eval_map **stop;
5524 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005525 union trace_eval_map_item *map_array;
5526 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005527
5528 stop = start + len;
5529
5530 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005531 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005532 * where the head holds the module and length of array, and the
5533 * tail holds a pointer to the next list.
5534 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005535 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005536 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005537 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005538 return;
5539 }
5540
Jeremy Linton1793ed92017-05-31 16:56:46 -05005541 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005542
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005543 if (!trace_eval_maps)
5544 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005545 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005546 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005547 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005548 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005549 if (!ptr->tail.next)
5550 break;
5551 ptr = ptr->tail.next;
5552
5553 }
5554 ptr->tail.next = map_array;
5555 }
5556 map_array->head.mod = mod;
5557 map_array->head.length = len;
5558 map_array++;
5559
5560 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5561 map_array->map = **map;
5562 map_array++;
5563 }
5564 memset(map_array, 0, sizeof(*map_array));
5565
Jeremy Linton1793ed92017-05-31 16:56:46 -05005566 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005567}
5568
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005569static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005570{
Jeremy Linton681bec02017-05-31 16:56:53 -05005571 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005572 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005573}
5574
Jeremy Linton681bec02017-05-31 16:56:53 -05005575#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005576static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5577static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005578 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005579#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005580
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005581static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005582 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005583{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005584 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005585
5586 if (len <= 0)
5587 return;
5588
5589 map = start;
5590
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005591 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005592
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005593 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005594}
5595
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005596static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005597tracing_set_trace_read(struct file *filp, char __user *ubuf,
5598 size_t cnt, loff_t *ppos)
5599{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005600 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005601 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005602 int r;
5603
5604 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005605 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005606 mutex_unlock(&trace_types_lock);
5607
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005608 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005609}
5610
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005611int tracer_init(struct tracer *t, struct trace_array *tr)
5612{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005613 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005614 return t->init(tr);
5615}
5616
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005617static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005618{
5619 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005620
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005621 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005622 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005623}
5624
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005625#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005626/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005627static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5628 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005629{
5630 int cpu, ret = 0;
5631
5632 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5633 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 ret = ring_buffer_resize(trace_buf->buffer,
5635 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005636 if (ret < 0)
5637 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005638 per_cpu_ptr(trace_buf->data, cpu)->entries =
5639 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005640 }
5641 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 ret = ring_buffer_resize(trace_buf->buffer,
5643 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005644 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005645 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5646 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005647 }
5648
5649 return ret;
5650}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005651#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005652
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005653static int __tracing_resize_ring_buffer(struct trace_array *tr,
5654 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005655{
5656 int ret;
5657
5658 /*
5659 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005660 * we use the size that was given, and we can forget about
5661 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005662 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005663 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005664
Steven Rostedtb382ede62012-10-10 21:44:34 -04005665 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005666 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005667 return 0;
5668
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005669 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005670 if (ret < 0)
5671 return ret;
5672
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005673#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005674 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5675 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005676 goto out;
5677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005678 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005679 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005680 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5681 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005682 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005683 /*
5684 * AARGH! We are left with different
5685 * size max buffer!!!!
5686 * The max buffer is our "snapshot" buffer.
5687 * When a tracer needs a snapshot (one of the
5688 * latency tracers), it swaps the max buffer
5689 * with the saved snap shot. We succeeded to
5690 * update the size of the main buffer, but failed to
5691 * update the size of the max buffer. But when we tried
5692 * to reset the main buffer to the original size, we
5693 * failed there too. This is very unlikely to
5694 * happen, but if it does, warn and kill all
5695 * tracing.
5696 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005697 WARN_ON(1);
5698 tracing_disabled = 1;
5699 }
5700 return ret;
5701 }
5702
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005703 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005704 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005705 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005706 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005707
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005708 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005709#endif /* CONFIG_TRACER_MAX_TRACE */
5710
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005711 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005712 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005713 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005714 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005715
5716 return ret;
5717}
5718
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005719ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5720 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005721{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005722 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005723
5724 mutex_lock(&trace_types_lock);
5725
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005726 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5727 /* make sure, this cpu is enabled in the mask */
5728 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5729 ret = -EINVAL;
5730 goto out;
5731 }
5732 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005733
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005734 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005735 if (ret < 0)
5736 ret = -ENOMEM;
5737
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005738out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005739 mutex_unlock(&trace_types_lock);
5740
5741 return ret;
5742}
5743
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005744
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005745/**
5746 * tracing_update_buffers - used by tracing facility to expand ring buffers
5747 *
5748 * To save on memory when the tracing is never used on a system with it
5749 * configured in. The ring buffers are set to a minimum size. But once
5750 * a user starts to use the tracing facility, then they need to grow
5751 * to their default size.
5752 *
5753 * This function is to be called when a tracer is about to be used.
5754 */
5755int tracing_update_buffers(void)
5756{
5757 int ret = 0;
5758
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005759 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005760 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005761 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005762 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005763 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005764
5765 return ret;
5766}
5767
Steven Rostedt577b7852009-02-26 23:43:05 -05005768struct trace_option_dentry;
5769
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005770static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005771create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005772
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005773/*
5774 * Used to clear out the tracer before deletion of an instance.
5775 * Must have trace_types_lock held.
5776 */
5777static void tracing_set_nop(struct trace_array *tr)
5778{
5779 if (tr->current_trace == &nop_trace)
5780 return;
5781
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005782 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005783
5784 if (tr->current_trace->reset)
5785 tr->current_trace->reset(tr);
5786
5787 tr->current_trace = &nop_trace;
5788}
5789
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005790static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005791{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005792 /* Only enable if the directory has been created already. */
5793 if (!tr->dir)
5794 return;
5795
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005796 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005797}
5798
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005799int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005800{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005801 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005802#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005803 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005804#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005805 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005806
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005807 mutex_lock(&trace_types_lock);
5808
Steven Rostedt73c51622009-03-11 13:42:01 -04005809 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005810 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005811 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005812 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005813 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005814 ret = 0;
5815 }
5816
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005817 for (t = trace_types; t; t = t->next) {
5818 if (strcmp(t->name, buf) == 0)
5819 break;
5820 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005821 if (!t) {
5822 ret = -EINVAL;
5823 goto out;
5824 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005826 goto out;
5827
Tom Zanussia35873a2019-02-13 17:42:45 -06005828#ifdef CONFIG_TRACER_SNAPSHOT
5829 if (t->use_max_tr) {
5830 arch_spin_lock(&tr->max_lock);
5831 if (tr->cond_snapshot)
5832 ret = -EBUSY;
5833 arch_spin_unlock(&tr->max_lock);
5834 if (ret)
5835 goto out;
5836 }
5837#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005838 /* Some tracers won't work on kernel command line */
5839 if (system_state < SYSTEM_RUNNING && t->noboot) {
5840 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5841 t->name);
5842 goto out;
5843 }
5844
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005845 /* Some tracers are only allowed for the top level buffer */
5846 if (!trace_ok_for_array(t, tr)) {
5847 ret = -EINVAL;
5848 goto out;
5849 }
5850
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005851 /* If trace pipe files are being read, we can't change the tracer */
5852 if (tr->current_trace->ref) {
5853 ret = -EBUSY;
5854 goto out;
5855 }
5856
Steven Rostedt9f029e82008-11-12 15:24:24 -05005857 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005858
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005859 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005860
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005861 if (tr->current_trace->reset)
5862 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005863
Paul E. McKenney74401722018-11-06 18:44:52 -08005864 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005865 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005866
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005867#ifdef CONFIG_TRACER_MAX_TRACE
5868 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005869
5870 if (had_max_tr && !t->use_max_tr) {
5871 /*
5872 * We need to make sure that the update_max_tr sees that
5873 * current_trace changed to nop_trace to keep it from
5874 * swapping the buffers after we resize it.
5875 * The update_max_tr is called from interrupts disabled
5876 * so a synchronized_sched() is sufficient.
5877 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005878 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005879 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005880 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005881#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005882
5883#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005884 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005885 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005886 if (ret < 0)
5887 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005888 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005889#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005890
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005891 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005892 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005893 if (ret)
5894 goto out;
5895 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005896
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005897 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005898 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005899 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005900 out:
5901 mutex_unlock(&trace_types_lock);
5902
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005903 return ret;
5904}
5905
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005906static ssize_t
5907tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5908 size_t cnt, loff_t *ppos)
5909{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005910 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005911 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005912 int i;
5913 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005914 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005915
Steven Rostedt60063a62008-10-28 10:44:24 -04005916 ret = cnt;
5917
Li Zefanee6c2c12009-09-18 14:06:47 +08005918 if (cnt > MAX_TRACER_SIZE)
5919 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005920
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005921 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005922 return -EFAULT;
5923
5924 buf[cnt] = 0;
5925
5926 /* strip ending whitespace. */
5927 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5928 buf[i] = 0;
5929
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005930 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005931 if (err)
5932 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005933
Jiri Olsacf8517c2009-10-23 19:36:16 -04005934 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005935
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005936 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005937}
5938
5939static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005940tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5941 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005942{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005943 char buf[64];
5944 int r;
5945
Steven Rostedtcffae432008-05-12 21:21:00 +02005946 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005947 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005948 if (r > sizeof(buf))
5949 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005950 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005951}
5952
5953static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005954tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5955 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005956{
Hannes Eder5e398412009-02-10 19:44:34 +01005957 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005958 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005959
Peter Huewe22fe9b52011-06-07 21:58:27 +02005960 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5961 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005962 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005963
5964 *ptr = val * 1000;
5965
5966 return cnt;
5967}
5968
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005969static ssize_t
5970tracing_thresh_read(struct file *filp, char __user *ubuf,
5971 size_t cnt, loff_t *ppos)
5972{
5973 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5974}
5975
5976static ssize_t
5977tracing_thresh_write(struct file *filp, const char __user *ubuf,
5978 size_t cnt, loff_t *ppos)
5979{
5980 struct trace_array *tr = filp->private_data;
5981 int ret;
5982
5983 mutex_lock(&trace_types_lock);
5984 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5985 if (ret < 0)
5986 goto out;
5987
5988 if (tr->current_trace->update_thresh) {
5989 ret = tr->current_trace->update_thresh(tr);
5990 if (ret < 0)
5991 goto out;
5992 }
5993
5994 ret = cnt;
5995out:
5996 mutex_unlock(&trace_types_lock);
5997
5998 return ret;
5999}
6000
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006001#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006002
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006003static ssize_t
6004tracing_max_lat_read(struct file *filp, char __user *ubuf,
6005 size_t cnt, loff_t *ppos)
6006{
6007 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6008}
6009
6010static ssize_t
6011tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6012 size_t cnt, loff_t *ppos)
6013{
6014 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6015}
6016
Chen Gange428abb2015-11-10 05:15:15 +08006017#endif
6018
Steven Rostedtb3806b42008-05-12 21:20:46 +02006019static int tracing_open_pipe(struct inode *inode, struct file *filp)
6020{
Oleg Nesterov15544202013-07-23 17:25:57 +02006021 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006022 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006023 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006024
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006025 ret = tracing_check_open_get_tr(tr);
6026 if (ret)
6027 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006028
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006029 mutex_lock(&trace_types_lock);
6030
Steven Rostedtb3806b42008-05-12 21:20:46 +02006031 /* create a buffer to store the information to pass to userspace */
6032 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006033 if (!iter) {
6034 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006035 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006036 goto out;
6037 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006038
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006039 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006040 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006041
6042 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6043 ret = -ENOMEM;
6044 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306045 }
6046
Steven Rostedta3097202008-11-07 22:36:02 -05006047 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306048 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006049
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006050 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006051 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6052
David Sharp8be07092012-11-13 12:18:22 -08006053 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006054 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006055 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6056
Oleg Nesterov15544202013-07-23 17:25:57 +02006057 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006058 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006059 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006060 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006061 filp->private_data = iter;
6062
Steven Rostedt107bad82008-05-12 21:21:01 +02006063 if (iter->trace->pipe_open)
6064 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006065
Arnd Bergmannb4447862010-07-07 23:40:11 +02006066 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006067
6068 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006069out:
6070 mutex_unlock(&trace_types_lock);
6071 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006072
6073fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006074 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006075 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006076 mutex_unlock(&trace_types_lock);
6077 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006078}
6079
6080static int tracing_release_pipe(struct inode *inode, struct file *file)
6081{
6082 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006083 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006084
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006085 mutex_lock(&trace_types_lock);
6086
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006087 tr->current_trace->ref--;
6088
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006089 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006090 iter->trace->pipe_close(iter);
6091
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006092 mutex_unlock(&trace_types_lock);
6093
Rusty Russell44623442009-01-01 10:12:23 +10306094 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006095 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006096 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006097
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006098 trace_array_put(tr);
6099
Steven Rostedtb3806b42008-05-12 21:20:46 +02006100 return 0;
6101}
6102
Al Viro9dd95742017-07-03 00:42:43 -04006103static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006104trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006105{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006106 struct trace_array *tr = iter->tr;
6107
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006108 /* Iterators are static, they should be filled or empty */
6109 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006110 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006111
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006112 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006113 /*
6114 * Always select as readable when in blocking mode
6115 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006116 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006117 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006118 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006119 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006120}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006121
Al Viro9dd95742017-07-03 00:42:43 -04006122static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006123tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6124{
6125 struct trace_iterator *iter = filp->private_data;
6126
6127 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006128}
6129
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006130/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006131static int tracing_wait_pipe(struct file *filp)
6132{
6133 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006134 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006135
6136 while (trace_empty(iter)) {
6137
6138 if ((filp->f_flags & O_NONBLOCK)) {
6139 return -EAGAIN;
6140 }
6141
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006142 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006143 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006144 * We still block if tracing is disabled, but we have never
6145 * read anything. This allows a user to cat this file, and
6146 * then enable tracing. But after we have read something,
6147 * we give an EOF when tracing is again disabled.
6148 *
6149 * iter->pos will be 0 if we haven't read anything.
6150 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006151 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006152 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006153
6154 mutex_unlock(&iter->mutex);
6155
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006156 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006157
6158 mutex_lock(&iter->mutex);
6159
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006160 if (ret)
6161 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006162 }
6163
6164 return 1;
6165}
6166
Steven Rostedtb3806b42008-05-12 21:20:46 +02006167/*
6168 * Consumer reader.
6169 */
6170static ssize_t
6171tracing_read_pipe(struct file *filp, char __user *ubuf,
6172 size_t cnt, loff_t *ppos)
6173{
6174 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006175 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006176
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006177 /*
6178 * Avoid more than one consumer on a single file descriptor
6179 * This is just a matter of traces coherency, the ring buffer itself
6180 * is protected.
6181 */
6182 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006183
6184 /* return any leftover data */
6185 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6186 if (sret != -EBUSY)
6187 goto out;
6188
6189 trace_seq_init(&iter->seq);
6190
Steven Rostedt107bad82008-05-12 21:21:01 +02006191 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006192 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6193 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006194 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006195 }
6196
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006197waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006198 sret = tracing_wait_pipe(filp);
6199 if (sret <= 0)
6200 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006201
6202 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006203 if (trace_empty(iter)) {
6204 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006205 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006206 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006207
6208 if (cnt >= PAGE_SIZE)
6209 cnt = PAGE_SIZE - 1;
6210
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006211 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006212 memset(&iter->seq, 0,
6213 sizeof(struct trace_iterator) -
6214 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006215 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006216 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006217 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006218
Lai Jiangshan4f535962009-05-18 19:35:34 +08006219 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006220 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006221 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006222 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006223 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006224
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006225 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006226 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006227 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006228 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006229 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006230 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006231 if (ret != TRACE_TYPE_NO_CONSUME)
6232 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006233
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006234 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006235 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006236
6237 /*
6238 * Setting the full flag means we reached the trace_seq buffer
6239 * size and we should leave by partial output condition above.
6240 * One of the trace_seq_* functions is not used properly.
6241 */
6242 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6243 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006244 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006245 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006246 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006247
Steven Rostedtb3806b42008-05-12 21:20:46 +02006248 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006249 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006250 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006251 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006252
6253 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006254 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006255 * entries, go back to wait for more entries.
6256 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006257 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006258 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006259
Steven Rostedt107bad82008-05-12 21:21:01 +02006260out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006261 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006262
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006263 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006264}
6265
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006266static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6267 unsigned int idx)
6268{
6269 __free_page(spd->pages[idx]);
6270}
6271
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006272static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006273 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006274 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006275 .steal = generic_pipe_buf_steal,
6276 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006277};
6278
Steven Rostedt34cd4992009-02-09 12:06:29 -05006279static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006280tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006281{
6282 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006283 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006284 int ret;
6285
6286 /* Seq buffer is page-sized, exactly what we need. */
6287 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006288 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006289 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006290
6291 if (trace_seq_has_overflowed(&iter->seq)) {
6292 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006293 break;
6294 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006295
6296 /*
6297 * This should not be hit, because it should only
6298 * be set if the iter->seq overflowed. But check it
6299 * anyway to be safe.
6300 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006301 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006302 iter->seq.seq.len = save_len;
6303 break;
6304 }
6305
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006306 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006307 if (rem < count) {
6308 rem = 0;
6309 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006310 break;
6311 }
6312
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006313 if (ret != TRACE_TYPE_NO_CONSUME)
6314 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006315 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006316 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006317 rem = 0;
6318 iter->ent = NULL;
6319 break;
6320 }
6321 }
6322
6323 return rem;
6324}
6325
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006326static ssize_t tracing_splice_read_pipe(struct file *filp,
6327 loff_t *ppos,
6328 struct pipe_inode_info *pipe,
6329 size_t len,
6330 unsigned int flags)
6331{
Jens Axboe35f3d142010-05-20 10:43:18 +02006332 struct page *pages_def[PIPE_DEF_BUFFERS];
6333 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006334 struct trace_iterator *iter = filp->private_data;
6335 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006336 .pages = pages_def,
6337 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006338 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006339 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006340 .ops = &tracing_pipe_buf_ops,
6341 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006342 };
6343 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006344 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006345 unsigned int i;
6346
Jens Axboe35f3d142010-05-20 10:43:18 +02006347 if (splice_grow_spd(pipe, &spd))
6348 return -ENOMEM;
6349
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006350 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006351
6352 if (iter->trace->splice_read) {
6353 ret = iter->trace->splice_read(iter, filp,
6354 ppos, pipe, len, flags);
6355 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006356 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006357 }
6358
6359 ret = tracing_wait_pipe(filp);
6360 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006361 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006362
Jason Wessel955b61e2010-08-05 09:22:23 -05006363 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006364 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006365 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006366 }
6367
Lai Jiangshan4f535962009-05-18 19:35:34 +08006368 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006369 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006370
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006371 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006372 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006373 spd.pages[i] = alloc_page(GFP_KERNEL);
6374 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006375 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006376
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006377 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006378
6379 /* Copy the data into the page, so we can start over. */
6380 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006381 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006382 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006383 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006384 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006385 break;
6386 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006387 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006388 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006389
Steven Rostedtf9520752009-03-02 14:04:40 -05006390 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006391 }
6392
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006393 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006394 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006395 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006396
6397 spd.nr_pages = i;
6398
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006399 if (i)
6400 ret = splice_to_pipe(pipe, &spd);
6401 else
6402 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006403out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006404 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006405 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006406
Steven Rostedt34cd4992009-02-09 12:06:29 -05006407out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006408 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006409 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006410}
6411
Steven Rostedta98a3c32008-05-12 21:20:59 +02006412static ssize_t
6413tracing_entries_read(struct file *filp, char __user *ubuf,
6414 size_t cnt, loff_t *ppos)
6415{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006416 struct inode *inode = file_inode(filp);
6417 struct trace_array *tr = inode->i_private;
6418 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006419 char buf[64];
6420 int r = 0;
6421 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006422
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006423 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006424
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006425 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006426 int cpu, buf_size_same;
6427 unsigned long size;
6428
6429 size = 0;
6430 buf_size_same = 1;
6431 /* check if all cpu sizes are same */
6432 for_each_tracing_cpu(cpu) {
6433 /* fill in the size from first enabled cpu */
6434 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006435 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6436 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006437 buf_size_same = 0;
6438 break;
6439 }
6440 }
6441
6442 if (buf_size_same) {
6443 if (!ring_buffer_expanded)
6444 r = sprintf(buf, "%lu (expanded: %lu)\n",
6445 size >> 10,
6446 trace_buf_size >> 10);
6447 else
6448 r = sprintf(buf, "%lu\n", size >> 10);
6449 } else
6450 r = sprintf(buf, "X\n");
6451 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006452 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006453
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006454 mutex_unlock(&trace_types_lock);
6455
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006456 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6457 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006458}
6459
6460static ssize_t
6461tracing_entries_write(struct file *filp, const char __user *ubuf,
6462 size_t cnt, loff_t *ppos)
6463{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006464 struct inode *inode = file_inode(filp);
6465 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006466 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006467 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006468
Peter Huewe22fe9b52011-06-07 21:58:27 +02006469 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6470 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006471 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006472
6473 /* must have at least 1 entry */
6474 if (!val)
6475 return -EINVAL;
6476
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006477 /* value is in KB */
6478 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006479 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006480 if (ret < 0)
6481 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006482
Jiri Olsacf8517c2009-10-23 19:36:16 -04006483 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006484
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006485 return cnt;
6486}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006487
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006488static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006489tracing_total_entries_read(struct file *filp, char __user *ubuf,
6490 size_t cnt, loff_t *ppos)
6491{
6492 struct trace_array *tr = filp->private_data;
6493 char buf[64];
6494 int r, cpu;
6495 unsigned long size = 0, expanded_size = 0;
6496
6497 mutex_lock(&trace_types_lock);
6498 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006499 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006500 if (!ring_buffer_expanded)
6501 expanded_size += trace_buf_size >> 10;
6502 }
6503 if (ring_buffer_expanded)
6504 r = sprintf(buf, "%lu\n", size);
6505 else
6506 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6507 mutex_unlock(&trace_types_lock);
6508
6509 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6510}
6511
6512static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006513tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6514 size_t cnt, loff_t *ppos)
6515{
6516 /*
6517 * There is no need to read what the user has written, this function
6518 * is just to make sure that there is no error when "echo" is used
6519 */
6520
6521 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006522
6523 return cnt;
6524}
6525
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006526static int
6527tracing_free_buffer_release(struct inode *inode, struct file *filp)
6528{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006529 struct trace_array *tr = inode->i_private;
6530
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006531 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006532 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006533 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006534 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006535 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006536
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006537 trace_array_put(tr);
6538
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006539 return 0;
6540}
6541
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006542static ssize_t
6543tracing_mark_write(struct file *filp, const char __user *ubuf,
6544 size_t cnt, loff_t *fpos)
6545{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006546 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006547 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006548 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006549 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006550 struct print_entry *entry;
6551 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006552 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006553 int size;
6554 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006555
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006556/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006557#define FAULTED_STR "<faulted>"
6558#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006559
Steven Rostedtc76f0692008-11-07 22:36:02 -05006560 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006561 return -EINVAL;
6562
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006563 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006564 return -EINVAL;
6565
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006566 if (cnt > TRACE_BUF_SIZE)
6567 cnt = TRACE_BUF_SIZE;
6568
Steven Rostedtd696b582011-09-22 11:50:27 -04006569 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006570
Steven Rostedtd696b582011-09-22 11:50:27 -04006571 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006572 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6573
6574 /* If less than "<faulted>", then make sure we can still add that */
6575 if (cnt < FAULTED_SIZE)
6576 size += FAULTED_SIZE - cnt;
6577
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006578 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006579 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6580 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006581 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006582 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006583 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006584
6585 entry = ring_buffer_event_data(event);
6586 entry->ip = _THIS_IP_;
6587
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006588 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6589 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006590 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006591 cnt = FAULTED_SIZE;
6592 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006593 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006594 written = cnt;
6595 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006596
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006597 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6598 /* do not add \n before testing triggers, but add \0 */
6599 entry->buf[cnt] = '\0';
6600 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6601 }
6602
Steven Rostedtd696b582011-09-22 11:50:27 -04006603 if (entry->buf[cnt - 1] != '\n') {
6604 entry->buf[cnt] = '\n';
6605 entry->buf[cnt + 1] = '\0';
6606 } else
6607 entry->buf[cnt] = '\0';
6608
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006609 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006610
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006611 if (tt)
6612 event_triggers_post_call(tr->trace_marker_file, tt);
6613
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006614 if (written > 0)
6615 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006616
Steven Rostedtfa32e852016-07-06 15:25:08 -04006617 return written;
6618}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006619
Steven Rostedtfa32e852016-07-06 15:25:08 -04006620/* Limit it for now to 3K (including tag) */
6621#define RAW_DATA_MAX_SIZE (1024*3)
6622
6623static ssize_t
6624tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6625 size_t cnt, loff_t *fpos)
6626{
6627 struct trace_array *tr = filp->private_data;
6628 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006629 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006630 struct raw_data_entry *entry;
6631 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006632 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006633 int size;
6634 int len;
6635
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006636#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6637
Steven Rostedtfa32e852016-07-06 15:25:08 -04006638 if (tracing_disabled)
6639 return -EINVAL;
6640
6641 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6642 return -EINVAL;
6643
6644 /* The marker must at least have a tag id */
6645 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6646 return -EINVAL;
6647
6648 if (cnt > TRACE_BUF_SIZE)
6649 cnt = TRACE_BUF_SIZE;
6650
6651 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6652
Steven Rostedtfa32e852016-07-06 15:25:08 -04006653 local_save_flags(irq_flags);
6654 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006655 if (cnt < FAULT_SIZE_ID)
6656 size += FAULT_SIZE_ID - cnt;
6657
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006658 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006659 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6660 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006661 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006662 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006663 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006664
6665 entry = ring_buffer_event_data(event);
6666
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006667 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6668 if (len) {
6669 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006670 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006671 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006672 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006673 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006674
6675 __buffer_unlock_commit(buffer, event);
6676
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006677 if (written > 0)
6678 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006679
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006680 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006681}
6682
Li Zefan13f16d22009-12-08 11:16:11 +08006683static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006684{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006685 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006686 int i;
6687
6688 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006689 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006690 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006691 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6692 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006693 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006694
Li Zefan13f16d22009-12-08 11:16:11 +08006695 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006696}
6697
Tom Zanussid71bd342018-01-15 20:52:07 -06006698int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006699{
Zhaolei5079f322009-08-25 16:12:56 +08006700 int i;
6701
Zhaolei5079f322009-08-25 16:12:56 +08006702 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6703 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6704 break;
6705 }
6706 if (i == ARRAY_SIZE(trace_clocks))
6707 return -EINVAL;
6708
Zhaolei5079f322009-08-25 16:12:56 +08006709 mutex_lock(&trace_types_lock);
6710
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006711 tr->clock_id = i;
6712
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006713 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006714
David Sharp60303ed2012-10-11 16:27:52 -07006715 /*
6716 * New clock may not be consistent with the previous clock.
6717 * Reset the buffer so that it doesn't have incomparable timestamps.
6718 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006719 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006720
6721#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006722 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006723 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006724 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006725#endif
David Sharp60303ed2012-10-11 16:27:52 -07006726
Zhaolei5079f322009-08-25 16:12:56 +08006727 mutex_unlock(&trace_types_lock);
6728
Steven Rostedte1e232c2014-02-10 23:38:46 -05006729 return 0;
6730}
6731
6732static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6733 size_t cnt, loff_t *fpos)
6734{
6735 struct seq_file *m = filp->private_data;
6736 struct trace_array *tr = m->private;
6737 char buf[64];
6738 const char *clockstr;
6739 int ret;
6740
6741 if (cnt >= sizeof(buf))
6742 return -EINVAL;
6743
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006744 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006745 return -EFAULT;
6746
6747 buf[cnt] = 0;
6748
6749 clockstr = strstrip(buf);
6750
6751 ret = tracing_set_clock(tr, clockstr);
6752 if (ret)
6753 return ret;
6754
Zhaolei5079f322009-08-25 16:12:56 +08006755 *fpos += cnt;
6756
6757 return cnt;
6758}
6759
Li Zefan13f16d22009-12-08 11:16:11 +08006760static int tracing_clock_open(struct inode *inode, struct file *file)
6761{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006762 struct trace_array *tr = inode->i_private;
6763 int ret;
6764
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006765 ret = tracing_check_open_get_tr(tr);
6766 if (ret)
6767 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006768
6769 ret = single_open(file, tracing_clock_show, inode->i_private);
6770 if (ret < 0)
6771 trace_array_put(tr);
6772
6773 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006774}
6775
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006776static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6777{
6778 struct trace_array *tr = m->private;
6779
6780 mutex_lock(&trace_types_lock);
6781
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006782 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006783 seq_puts(m, "delta [absolute]\n");
6784 else
6785 seq_puts(m, "[delta] absolute\n");
6786
6787 mutex_unlock(&trace_types_lock);
6788
6789 return 0;
6790}
6791
6792static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6793{
6794 struct trace_array *tr = inode->i_private;
6795 int ret;
6796
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006797 ret = tracing_check_open_get_tr(tr);
6798 if (ret)
6799 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006800
6801 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6802 if (ret < 0)
6803 trace_array_put(tr);
6804
6805 return ret;
6806}
6807
Tom Zanussi00b41452018-01-15 20:51:39 -06006808int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6809{
6810 int ret = 0;
6811
6812 mutex_lock(&trace_types_lock);
6813
6814 if (abs && tr->time_stamp_abs_ref++)
6815 goto out;
6816
6817 if (!abs) {
6818 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6819 ret = -EINVAL;
6820 goto out;
6821 }
6822
6823 if (--tr->time_stamp_abs_ref)
6824 goto out;
6825 }
6826
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006827 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
Tom Zanussi00b41452018-01-15 20:51:39 -06006828
6829#ifdef CONFIG_TRACER_MAX_TRACE
6830 if (tr->max_buffer.buffer)
6831 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6832#endif
6833 out:
6834 mutex_unlock(&trace_types_lock);
6835
6836 return ret;
6837}
6838
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006839struct ftrace_buffer_info {
6840 struct trace_iterator iter;
6841 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006842 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006843 unsigned int read;
6844};
6845
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006846#ifdef CONFIG_TRACER_SNAPSHOT
6847static int tracing_snapshot_open(struct inode *inode, struct file *file)
6848{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006849 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006850 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006851 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006852 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006853
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006854 ret = tracing_check_open_get_tr(tr);
6855 if (ret)
6856 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006857
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006858 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006859 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006860 if (IS_ERR(iter))
6861 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006862 } else {
6863 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006864 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006865 m = kzalloc(sizeof(*m), GFP_KERNEL);
6866 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006867 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006868 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6869 if (!iter) {
6870 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006871 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006872 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006873 ret = 0;
6874
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006875 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006876 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006877 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006878 m->private = iter;
6879 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006880 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006881out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006882 if (ret < 0)
6883 trace_array_put(tr);
6884
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006885 return ret;
6886}
6887
6888static ssize_t
6889tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6890 loff_t *ppos)
6891{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006892 struct seq_file *m = filp->private_data;
6893 struct trace_iterator *iter = m->private;
6894 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006895 unsigned long val;
6896 int ret;
6897
6898 ret = tracing_update_buffers();
6899 if (ret < 0)
6900 return ret;
6901
6902 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6903 if (ret)
6904 return ret;
6905
6906 mutex_lock(&trace_types_lock);
6907
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006908 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006909 ret = -EBUSY;
6910 goto out;
6911 }
6912
Tom Zanussia35873a2019-02-13 17:42:45 -06006913 arch_spin_lock(&tr->max_lock);
6914 if (tr->cond_snapshot)
6915 ret = -EBUSY;
6916 arch_spin_unlock(&tr->max_lock);
6917 if (ret)
6918 goto out;
6919
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006920 switch (val) {
6921 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006922 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6923 ret = -EINVAL;
6924 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006925 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006926 if (tr->allocated_snapshot)
6927 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006928 break;
6929 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006930/* Only allow per-cpu swap if the ring buffer supports it */
6931#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6932 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6933 ret = -EINVAL;
6934 break;
6935 }
6936#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006937 if (tr->allocated_snapshot)
6938 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006939 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006940 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006941 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006942 if (ret < 0)
6943 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006944 local_irq_disable();
6945 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006946 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006947 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006948 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006949 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006950 local_irq_enable();
6951 break;
6952 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006953 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006954 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6955 tracing_reset_online_cpus(&tr->max_buffer);
6956 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04006957 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006958 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006959 break;
6960 }
6961
6962 if (ret >= 0) {
6963 *ppos += cnt;
6964 ret = cnt;
6965 }
6966out:
6967 mutex_unlock(&trace_types_lock);
6968 return ret;
6969}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006970
6971static int tracing_snapshot_release(struct inode *inode, struct file *file)
6972{
6973 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006974 int ret;
6975
6976 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006977
6978 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006979 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006980
6981 /* If write only, the seq_file is just a stub */
6982 if (m)
6983 kfree(m->private);
6984 kfree(m);
6985
6986 return 0;
6987}
6988
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006989static int tracing_buffers_open(struct inode *inode, struct file *filp);
6990static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6991 size_t count, loff_t *ppos);
6992static int tracing_buffers_release(struct inode *inode, struct file *file);
6993static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6994 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6995
6996static int snapshot_raw_open(struct inode *inode, struct file *filp)
6997{
6998 struct ftrace_buffer_info *info;
6999 int ret;
7000
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007001 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007002 ret = tracing_buffers_open(inode, filp);
7003 if (ret < 0)
7004 return ret;
7005
7006 info = filp->private_data;
7007
7008 if (info->iter.trace->use_max_tr) {
7009 tracing_buffers_release(inode, filp);
7010 return -EBUSY;
7011 }
7012
7013 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007014 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007015
7016 return ret;
7017}
7018
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007019#endif /* CONFIG_TRACER_SNAPSHOT */
7020
7021
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007022static const struct file_operations tracing_thresh_fops = {
7023 .open = tracing_open_generic,
7024 .read = tracing_thresh_read,
7025 .write = tracing_thresh_write,
7026 .llseek = generic_file_llseek,
7027};
7028
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007029#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007030static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007031 .open = tracing_open_generic,
7032 .read = tracing_max_lat_read,
7033 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007034 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007035};
Chen Gange428abb2015-11-10 05:15:15 +08007036#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007037
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007038static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007039 .open = tracing_open_generic,
7040 .read = tracing_set_trace_read,
7041 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007042 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007043};
7044
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007045static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007046 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007047 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007048 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007049 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007050 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007051 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007052};
7053
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007054static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007055 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007056 .read = tracing_entries_read,
7057 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007058 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007059 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007060};
7061
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007062static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007063 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007064 .read = tracing_total_entries_read,
7065 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007066 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007067};
7068
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007069static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007070 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007071 .write = tracing_free_buffer_write,
7072 .release = tracing_free_buffer_release,
7073};
7074
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007075static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007076 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007077 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007078 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007079 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007080};
7081
Steven Rostedtfa32e852016-07-06 15:25:08 -04007082static const struct file_operations tracing_mark_raw_fops = {
7083 .open = tracing_open_generic_tr,
7084 .write = tracing_mark_raw_write,
7085 .llseek = generic_file_llseek,
7086 .release = tracing_release_generic_tr,
7087};
7088
Zhaolei5079f322009-08-25 16:12:56 +08007089static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007090 .open = tracing_clock_open,
7091 .read = seq_read,
7092 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007093 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007094 .write = tracing_clock_write,
7095};
7096
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007097static const struct file_operations trace_time_stamp_mode_fops = {
7098 .open = tracing_time_stamp_mode_open,
7099 .read = seq_read,
7100 .llseek = seq_lseek,
7101 .release = tracing_single_release_tr,
7102};
7103
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007104#ifdef CONFIG_TRACER_SNAPSHOT
7105static const struct file_operations snapshot_fops = {
7106 .open = tracing_snapshot_open,
7107 .read = seq_read,
7108 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007109 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007110 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007111};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007112
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007113static const struct file_operations snapshot_raw_fops = {
7114 .open = snapshot_raw_open,
7115 .read = tracing_buffers_read,
7116 .release = tracing_buffers_release,
7117 .splice_read = tracing_buffers_splice_read,
7118 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007119};
7120
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007121#endif /* CONFIG_TRACER_SNAPSHOT */
7122
Tom Zanussi8a062902019-03-31 18:48:15 -05007123#define TRACING_LOG_ERRS_MAX 8
7124#define TRACING_LOG_LOC_MAX 128
7125
7126#define CMD_PREFIX " Command: "
7127
7128struct err_info {
7129 const char **errs; /* ptr to loc-specific array of err strings */
7130 u8 type; /* index into errs -> specific err string */
7131 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7132 u64 ts;
7133};
7134
7135struct tracing_log_err {
7136 struct list_head list;
7137 struct err_info info;
7138 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7139 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7140};
7141
Tom Zanussi8a062902019-03-31 18:48:15 -05007142static DEFINE_MUTEX(tracing_err_log_lock);
7143
YueHaibingff585c52019-06-14 23:32:10 +08007144static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007145{
7146 struct tracing_log_err *err;
7147
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007148 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007149 err = kzalloc(sizeof(*err), GFP_KERNEL);
7150 if (!err)
7151 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007152 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007153
7154 return err;
7155 }
7156
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007157 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007158 list_del(&err->list);
7159
7160 return err;
7161}
7162
7163/**
7164 * err_pos - find the position of a string within a command for error careting
7165 * @cmd: The tracing command that caused the error
7166 * @str: The string to position the caret at within @cmd
7167 *
7168 * Finds the position of the first occurence of @str within @cmd. The
7169 * return value can be passed to tracing_log_err() for caret placement
7170 * within @cmd.
7171 *
7172 * Returns the index within @cmd of the first occurence of @str or 0
7173 * if @str was not found.
7174 */
7175unsigned int err_pos(char *cmd, const char *str)
7176{
7177 char *found;
7178
7179 if (WARN_ON(!strlen(cmd)))
7180 return 0;
7181
7182 found = strstr(cmd, str);
7183 if (found)
7184 return found - cmd;
7185
7186 return 0;
7187}
7188
7189/**
7190 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007191 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007192 * @loc: A string describing where the error occurred
7193 * @cmd: The tracing command that caused the error
7194 * @errs: The array of loc-specific static error strings
7195 * @type: The index into errs[], which produces the specific static err string
7196 * @pos: The position the caret should be placed in the cmd
7197 *
7198 * Writes an error into tracing/error_log of the form:
7199 *
7200 * <loc>: error: <text>
7201 * Command: <cmd>
7202 * ^
7203 *
7204 * tracing/error_log is a small log file containing the last
7205 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7206 * unless there has been a tracing error, and the error log can be
7207 * cleared and have its memory freed by writing the empty string in
7208 * truncation mode to it i.e. echo > tracing/error_log.
7209 *
7210 * NOTE: the @errs array along with the @type param are used to
7211 * produce a static error string - this string is not copied and saved
7212 * when the error is logged - only a pointer to it is saved. See
7213 * existing callers for examples of how static strings are typically
7214 * defined for use with tracing_log_err().
7215 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007216void tracing_log_err(struct trace_array *tr,
7217 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007218 const char **errs, u8 type, u8 pos)
7219{
7220 struct tracing_log_err *err;
7221
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007222 if (!tr)
7223 tr = &global_trace;
7224
Tom Zanussi8a062902019-03-31 18:48:15 -05007225 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007226 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007227 if (PTR_ERR(err) == -ENOMEM) {
7228 mutex_unlock(&tracing_err_log_lock);
7229 return;
7230 }
7231
7232 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7233 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7234
7235 err->info.errs = errs;
7236 err->info.type = type;
7237 err->info.pos = pos;
7238 err->info.ts = local_clock();
7239
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007240 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007241 mutex_unlock(&tracing_err_log_lock);
7242}
7243
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007244static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007245{
7246 struct tracing_log_err *err, *next;
7247
7248 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007249 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007250 list_del(&err->list);
7251 kfree(err);
7252 }
7253
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007254 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007255 mutex_unlock(&tracing_err_log_lock);
7256}
7257
7258static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7259{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007260 struct trace_array *tr = m->private;
7261
Tom Zanussi8a062902019-03-31 18:48:15 -05007262 mutex_lock(&tracing_err_log_lock);
7263
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007264 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007265}
7266
7267static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7268{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007269 struct trace_array *tr = m->private;
7270
7271 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007272}
7273
7274static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7275{
7276 mutex_unlock(&tracing_err_log_lock);
7277}
7278
7279static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7280{
7281 u8 i;
7282
7283 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7284 seq_putc(m, ' ');
7285 for (i = 0; i < pos; i++)
7286 seq_putc(m, ' ');
7287 seq_puts(m, "^\n");
7288}
7289
7290static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7291{
7292 struct tracing_log_err *err = v;
7293
7294 if (err) {
7295 const char *err_text = err->info.errs[err->info.type];
7296 u64 sec = err->info.ts;
7297 u32 nsec;
7298
7299 nsec = do_div(sec, NSEC_PER_SEC);
7300 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7301 err->loc, err_text);
7302 seq_printf(m, "%s", err->cmd);
7303 tracing_err_log_show_pos(m, err->info.pos);
7304 }
7305
7306 return 0;
7307}
7308
7309static const struct seq_operations tracing_err_log_seq_ops = {
7310 .start = tracing_err_log_seq_start,
7311 .next = tracing_err_log_seq_next,
7312 .stop = tracing_err_log_seq_stop,
7313 .show = tracing_err_log_seq_show
7314};
7315
7316static int tracing_err_log_open(struct inode *inode, struct file *file)
7317{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007318 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007319 int ret = 0;
7320
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007321 ret = tracing_check_open_get_tr(tr);
7322 if (ret)
7323 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007324
Tom Zanussi8a062902019-03-31 18:48:15 -05007325 /* If this file was opened for write, then erase contents */
7326 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007327 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007328
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007329 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007330 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007331 if (!ret) {
7332 struct seq_file *m = file->private_data;
7333 m->private = tr;
7334 } else {
7335 trace_array_put(tr);
7336 }
7337 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007338 return ret;
7339}
7340
7341static ssize_t tracing_err_log_write(struct file *file,
7342 const char __user *buffer,
7343 size_t count, loff_t *ppos)
7344{
7345 return count;
7346}
7347
Takeshi Misawad122ed62019-06-28 19:56:40 +09007348static int tracing_err_log_release(struct inode *inode, struct file *file)
7349{
7350 struct trace_array *tr = inode->i_private;
7351
7352 trace_array_put(tr);
7353
7354 if (file->f_mode & FMODE_READ)
7355 seq_release(inode, file);
7356
7357 return 0;
7358}
7359
Tom Zanussi8a062902019-03-31 18:48:15 -05007360static const struct file_operations tracing_err_log_fops = {
7361 .open = tracing_err_log_open,
7362 .write = tracing_err_log_write,
7363 .read = seq_read,
7364 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007365 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007366};
7367
Steven Rostedt2cadf912008-12-01 22:20:19 -05007368static int tracing_buffers_open(struct inode *inode, struct file *filp)
7369{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007370 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007371 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007372 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007373
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007374 ret = tracing_check_open_get_tr(tr);
7375 if (ret)
7376 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007377
Steven Rostedt2cadf912008-12-01 22:20:19 -05007378 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007379 if (!info) {
7380 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007381 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007382 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007383
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007384 mutex_lock(&trace_types_lock);
7385
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007386 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007387 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007388 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007389 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007390 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007391 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007392 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007393
7394 filp->private_data = info;
7395
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007396 tr->current_trace->ref++;
7397
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007398 mutex_unlock(&trace_types_lock);
7399
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007400 ret = nonseekable_open(inode, filp);
7401 if (ret < 0)
7402 trace_array_put(tr);
7403
7404 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007405}
7406
Al Viro9dd95742017-07-03 00:42:43 -04007407static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007408tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7409{
7410 struct ftrace_buffer_info *info = filp->private_data;
7411 struct trace_iterator *iter = &info->iter;
7412
7413 return trace_poll(iter, filp, poll_table);
7414}
7415
Steven Rostedt2cadf912008-12-01 22:20:19 -05007416static ssize_t
7417tracing_buffers_read(struct file *filp, char __user *ubuf,
7418 size_t count, loff_t *ppos)
7419{
7420 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007421 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007422 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007423 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007424
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007425 if (!count)
7426 return 0;
7427
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007428#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007429 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7430 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007431#endif
7432
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007433 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007434 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007435 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007436 if (IS_ERR(info->spare)) {
7437 ret = PTR_ERR(info->spare);
7438 info->spare = NULL;
7439 } else {
7440 info->spare_cpu = iter->cpu_file;
7441 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007442 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007443 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007444 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007445
Steven Rostedt2cadf912008-12-01 22:20:19 -05007446 /* Do we have previous read data to read? */
7447 if (info->read < PAGE_SIZE)
7448 goto read;
7449
Steven Rostedtb6273442013-02-28 13:44:11 -05007450 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007451 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007452 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007453 &info->spare,
7454 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007455 iter->cpu_file, 0);
7456 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007457
7458 if (ret < 0) {
7459 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007460 if ((filp->f_flags & O_NONBLOCK))
7461 return -EAGAIN;
7462
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007463 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007464 if (ret)
7465 return ret;
7466
Steven Rostedtb6273442013-02-28 13:44:11 -05007467 goto again;
7468 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007469 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007470 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007471
Steven Rostedt436fc282011-10-14 10:44:25 -04007472 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007473 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007474 size = PAGE_SIZE - info->read;
7475 if (size > count)
7476 size = count;
7477
7478 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007479 if (ret == size)
7480 return -EFAULT;
7481
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007482 size -= ret;
7483
Steven Rostedt2cadf912008-12-01 22:20:19 -05007484 *ppos += size;
7485 info->read += size;
7486
7487 return size;
7488}
7489
7490static int tracing_buffers_release(struct inode *inode, struct file *file)
7491{
7492 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007493 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007494
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007495 mutex_lock(&trace_types_lock);
7496
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007497 iter->tr->current_trace->ref--;
7498
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007499 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007500
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007501 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007502 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007503 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007504 kfree(info);
7505
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007506 mutex_unlock(&trace_types_lock);
7507
Steven Rostedt2cadf912008-12-01 22:20:19 -05007508 return 0;
7509}
7510
7511struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007512 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007513 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007514 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007515 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007516};
7517
Jann Hornb9872222019-04-04 23:59:25 +02007518static void buffer_ref_release(struct buffer_ref *ref)
7519{
7520 if (!refcount_dec_and_test(&ref->refcount))
7521 return;
7522 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7523 kfree(ref);
7524}
7525
Steven Rostedt2cadf912008-12-01 22:20:19 -05007526static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7527 struct pipe_buffer *buf)
7528{
7529 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7530
Jann Hornb9872222019-04-04 23:59:25 +02007531 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007532 buf->private = 0;
7533}
7534
Matthew Wilcox15fab632019-04-05 14:02:10 -07007535static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007536 struct pipe_buffer *buf)
7537{
7538 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7539
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007540 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007541 return false;
7542
Jann Hornb9872222019-04-04 23:59:25 +02007543 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007544 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007545}
7546
7547/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007548static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007549 .confirm = generic_pipe_buf_confirm,
7550 .release = buffer_pipe_buf_release,
Jann Hornb9872222019-04-04 23:59:25 +02007551 .steal = generic_pipe_buf_nosteal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007552 .get = buffer_pipe_buf_get,
7553};
7554
7555/*
7556 * Callback from splice_to_pipe(), if we need to release some pages
7557 * at the end of the spd in case we error'ed out in filling the pipe.
7558 */
7559static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7560{
7561 struct buffer_ref *ref =
7562 (struct buffer_ref *)spd->partial[i].private;
7563
Jann Hornb9872222019-04-04 23:59:25 +02007564 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007565 spd->partial[i].private = 0;
7566}
7567
7568static ssize_t
7569tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7570 struct pipe_inode_info *pipe, size_t len,
7571 unsigned int flags)
7572{
7573 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007574 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007575 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7576 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007577 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007578 .pages = pages_def,
7579 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007580 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007581 .ops = &buffer_pipe_buf_ops,
7582 .spd_release = buffer_spd_release,
7583 };
7584 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007585 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007586 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007587
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007588#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007589 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7590 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007591#endif
7592
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007593 if (*ppos & (PAGE_SIZE - 1))
7594 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007595
7596 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007597 if (len < PAGE_SIZE)
7598 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007599 len &= PAGE_MASK;
7600 }
7601
Al Viro1ae22932016-09-17 18:31:46 -04007602 if (splice_grow_spd(pipe, &spd))
7603 return -ENOMEM;
7604
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007605 again:
7606 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007607 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007608
Al Viroa786c062014-04-11 12:01:03 -04007609 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007610 struct page *page;
7611 int r;
7612
7613 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007614 if (!ref) {
7615 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007616 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007617 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007618
Jann Hornb9872222019-04-04 23:59:25 +02007619 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007620 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007621 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007622 if (IS_ERR(ref->page)) {
7623 ret = PTR_ERR(ref->page);
7624 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007625 kfree(ref);
7626 break;
7627 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007628 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007629
7630 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007631 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007632 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007633 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7634 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007635 kfree(ref);
7636 break;
7637 }
7638
Steven Rostedt2cadf912008-12-01 22:20:19 -05007639 page = virt_to_page(ref->page);
7640
7641 spd.pages[i] = page;
7642 spd.partial[i].len = PAGE_SIZE;
7643 spd.partial[i].offset = 0;
7644 spd.partial[i].private = (unsigned long)ref;
7645 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007646 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007647
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007648 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007649 }
7650
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007651 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007652 spd.nr_pages = i;
7653
7654 /* did we read anything? */
7655 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007656 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007657 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007658
Al Viro1ae22932016-09-17 18:31:46 -04007659 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007660 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007661 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007662
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007663 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007664 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007665 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007666
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007667 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007668 }
7669
7670 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007671out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007672 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007673
Steven Rostedt2cadf912008-12-01 22:20:19 -05007674 return ret;
7675}
7676
7677static const struct file_operations tracing_buffers_fops = {
7678 .open = tracing_buffers_open,
7679 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007680 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007681 .release = tracing_buffers_release,
7682 .splice_read = tracing_buffers_splice_read,
7683 .llseek = no_llseek,
7684};
7685
Steven Rostedtc8d77182009-04-29 18:03:45 -04007686static ssize_t
7687tracing_stats_read(struct file *filp, char __user *ubuf,
7688 size_t count, loff_t *ppos)
7689{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007690 struct inode *inode = file_inode(filp);
7691 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007692 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007693 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007694 struct trace_seq *s;
7695 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007696 unsigned long long t;
7697 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007698
Li Zefane4f2d102009-06-15 10:57:28 +08007699 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007700 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007701 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007702
7703 trace_seq_init(s);
7704
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007705 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007706 trace_seq_printf(s, "entries: %ld\n", cnt);
7707
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007708 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007709 trace_seq_printf(s, "overrun: %ld\n", cnt);
7710
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007711 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007712 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7713
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007714 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007715 trace_seq_printf(s, "bytes: %ld\n", cnt);
7716
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007717 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007718 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007719 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007720 usec_rem = do_div(t, USEC_PER_SEC);
7721 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7722 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007723
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007724 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007725 usec_rem = do_div(t, USEC_PER_SEC);
7726 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7727 } else {
7728 /* counter or tsc mode for trace_clock */
7729 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007730 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007731
7732 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007733 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007734 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007735
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007736 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007737 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7738
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007739 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007740 trace_seq_printf(s, "read events: %ld\n", cnt);
7741
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007742 count = simple_read_from_buffer(ubuf, count, ppos,
7743 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007744
7745 kfree(s);
7746
7747 return count;
7748}
7749
7750static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007751 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007752 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007753 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007754 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007755};
7756
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007757#ifdef CONFIG_DYNAMIC_FTRACE
7758
7759static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007760tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007761 size_t cnt, loff_t *ppos)
7762{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007763 ssize_t ret;
7764 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007765 int r;
7766
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007767 /* 256 should be plenty to hold the amount needed */
7768 buf = kmalloc(256, GFP_KERNEL);
7769 if (!buf)
7770 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007771
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04007772 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7773 ftrace_update_tot_cnt,
7774 ftrace_number_of_pages,
7775 ftrace_number_of_groups);
7776
7777 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7778 kfree(buf);
7779 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007780}
7781
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007782static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007783 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007784 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007785 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007786};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007787#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007788
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007789#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7790static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007791ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007792 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007793 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007794{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007795 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007796}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007797
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007798static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007799ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007800 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007801 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007802{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007803 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007804 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007805
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007806 if (mapper)
7807 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007808
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007809 if (count) {
7810
7811 if (*count <= 0)
7812 return;
7813
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007814 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007815 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007816
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007817 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007818}
7819
7820static int
7821ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7822 struct ftrace_probe_ops *ops, void *data)
7823{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007824 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007825 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007826
7827 seq_printf(m, "%ps:", (void *)ip);
7828
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007829 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007830
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007831 if (mapper)
7832 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7833
7834 if (count)
7835 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007836 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007837 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007838
7839 return 0;
7840}
7841
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007842static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007843ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007844 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007845{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007846 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007847
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007848 if (!mapper) {
7849 mapper = allocate_ftrace_func_mapper();
7850 if (!mapper)
7851 return -ENOMEM;
7852 *data = mapper;
7853 }
7854
7855 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007856}
7857
7858static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007859ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007860 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007861{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007862 struct ftrace_func_mapper *mapper = data;
7863
7864 if (!ip) {
7865 if (!mapper)
7866 return;
7867 free_ftrace_func_mapper(mapper, NULL);
7868 return;
7869 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007870
7871 ftrace_func_mapper_remove_ip(mapper, ip);
7872}
7873
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007874static struct ftrace_probe_ops snapshot_probe_ops = {
7875 .func = ftrace_snapshot,
7876 .print = ftrace_snapshot_print,
7877};
7878
7879static struct ftrace_probe_ops snapshot_count_probe_ops = {
7880 .func = ftrace_count_snapshot,
7881 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007882 .init = ftrace_snapshot_init,
7883 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007884};
7885
7886static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007887ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007888 char *glob, char *cmd, char *param, int enable)
7889{
7890 struct ftrace_probe_ops *ops;
7891 void *count = (void *)-1;
7892 char *number;
7893 int ret;
7894
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007895 if (!tr)
7896 return -ENODEV;
7897
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007898 /* hash funcs only work with set_ftrace_filter */
7899 if (!enable)
7900 return -EINVAL;
7901
7902 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7903
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007904 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007905 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007906
7907 if (!param)
7908 goto out_reg;
7909
7910 number = strsep(&param, ":");
7911
7912 if (!strlen(number))
7913 goto out_reg;
7914
7915 /*
7916 * We use the callback data field (which is a pointer)
7917 * as our counter.
7918 */
7919 ret = kstrtoul(number, 0, (unsigned long *)&count);
7920 if (ret)
7921 return ret;
7922
7923 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007924 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007925 if (ret < 0)
7926 goto out;
7927
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007928 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007929
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007930 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007931 return ret < 0 ? ret : 0;
7932}
7933
7934static struct ftrace_func_command ftrace_snapshot_cmd = {
7935 .name = "snapshot",
7936 .func = ftrace_trace_snapshot_callback,
7937};
7938
Tom Zanussi38de93a2013-10-24 08:34:18 -05007939static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007940{
7941 return register_ftrace_command(&ftrace_snapshot_cmd);
7942}
7943#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007944static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007945#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007946
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007947static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007948{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007949 if (WARN_ON(!tr->dir))
7950 return ERR_PTR(-ENODEV);
7951
7952 /* Top directory uses NULL as the parent */
7953 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7954 return NULL;
7955
7956 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007957 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007958}
7959
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007960static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7961{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007962 struct dentry *d_tracer;
7963
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007964 if (tr->percpu_dir)
7965 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007966
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007967 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007968 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007969 return NULL;
7970
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007971 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007972
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05007973 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007974 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007975
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007976 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007977}
7978
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007979static struct dentry *
7980trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7981 void *data, long cpu, const struct file_operations *fops)
7982{
7983 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7984
7985 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007986 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007987 return ret;
7988}
7989
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007990static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007991tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007992{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007993 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007994 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007995 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007996
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007997 if (!d_percpu)
7998 return;
7999
Steven Rostedtdd49a382010-10-20 21:51:26 -04008000 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008001 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008002 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008003 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008004 return;
8005 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008006
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008007 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008008 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008009 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008010
8011 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008012 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008013 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008014
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008015 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008016 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008017
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008018 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008019 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008020
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008021 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008022 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008023
8024#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008025 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008026 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008027
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008028 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008029 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008030#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008031}
8032
Steven Rostedt60a11772008-05-12 21:20:44 +02008033#ifdef CONFIG_FTRACE_SELFTEST
8034/* Let selftest have access to static functions in this file */
8035#include "trace_selftest.c"
8036#endif
8037
Steven Rostedt577b7852009-02-26 23:43:05 -05008038static ssize_t
8039trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8040 loff_t *ppos)
8041{
8042 struct trace_option_dentry *topt = filp->private_data;
8043 char *buf;
8044
8045 if (topt->flags->val & topt->opt->bit)
8046 buf = "1\n";
8047 else
8048 buf = "0\n";
8049
8050 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8051}
8052
8053static ssize_t
8054trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8055 loff_t *ppos)
8056{
8057 struct trace_option_dentry *topt = filp->private_data;
8058 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008059 int ret;
8060
Peter Huewe22fe9b52011-06-07 21:58:27 +02008061 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8062 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008063 return ret;
8064
Li Zefan8d18eaa2009-12-08 11:17:06 +08008065 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008066 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008067
8068 if (!!(topt->flags->val & topt->opt->bit) != val) {
8069 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008070 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008071 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008072 mutex_unlock(&trace_types_lock);
8073 if (ret)
8074 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008075 }
8076
8077 *ppos += cnt;
8078
8079 return cnt;
8080}
8081
8082
8083static const struct file_operations trace_options_fops = {
8084 .open = tracing_open_generic,
8085 .read = trace_options_read,
8086 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008087 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008088};
8089
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008090/*
8091 * In order to pass in both the trace_array descriptor as well as the index
8092 * to the flag that the trace option file represents, the trace_array
8093 * has a character array of trace_flags_index[], which holds the index
8094 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8095 * The address of this character array is passed to the flag option file
8096 * read/write callbacks.
8097 *
8098 * In order to extract both the index and the trace_array descriptor,
8099 * get_tr_index() uses the following algorithm.
8100 *
8101 * idx = *ptr;
8102 *
8103 * As the pointer itself contains the address of the index (remember
8104 * index[1] == 1).
8105 *
8106 * Then to get the trace_array descriptor, by subtracting that index
8107 * from the ptr, we get to the start of the index itself.
8108 *
8109 * ptr - idx == &index[0]
8110 *
8111 * Then a simple container_of() from that pointer gets us to the
8112 * trace_array descriptor.
8113 */
8114static void get_tr_index(void *data, struct trace_array **ptr,
8115 unsigned int *pindex)
8116{
8117 *pindex = *(unsigned char *)data;
8118
8119 *ptr = container_of(data - *pindex, struct trace_array,
8120 trace_flags_index);
8121}
8122
Steven Rostedta8259072009-02-26 22:19:12 -05008123static ssize_t
8124trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8125 loff_t *ppos)
8126{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008127 void *tr_index = filp->private_data;
8128 struct trace_array *tr;
8129 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008130 char *buf;
8131
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008132 get_tr_index(tr_index, &tr, &index);
8133
8134 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008135 buf = "1\n";
8136 else
8137 buf = "0\n";
8138
8139 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8140}
8141
8142static ssize_t
8143trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8144 loff_t *ppos)
8145{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008146 void *tr_index = filp->private_data;
8147 struct trace_array *tr;
8148 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008149 unsigned long val;
8150 int ret;
8151
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008152 get_tr_index(tr_index, &tr, &index);
8153
Peter Huewe22fe9b52011-06-07 21:58:27 +02008154 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8155 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008156 return ret;
8157
Zhaoleif2d84b62009-08-07 18:55:48 +08008158 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008159 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008160
Prateek Sood3a53acf2019-12-10 09:15:16 +00008161 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008162 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008163 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008164 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008165 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008166
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008167 if (ret < 0)
8168 return ret;
8169
Steven Rostedta8259072009-02-26 22:19:12 -05008170 *ppos += cnt;
8171
8172 return cnt;
8173}
8174
Steven Rostedta8259072009-02-26 22:19:12 -05008175static const struct file_operations trace_options_core_fops = {
8176 .open = tracing_open_generic,
8177 .read = trace_options_core_read,
8178 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008179 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008180};
8181
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008182struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008183 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008184 struct dentry *parent,
8185 void *data,
8186 const struct file_operations *fops)
8187{
8188 struct dentry *ret;
8189
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008190 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008191 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008192 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008193
8194 return ret;
8195}
8196
8197
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008198static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008199{
8200 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008201
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008202 if (tr->options)
8203 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008204
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008205 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008206 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008207 return NULL;
8208
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008209 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008210 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008211 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008212 return NULL;
8213 }
8214
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008215 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008216}
8217
Steven Rostedt577b7852009-02-26 23:43:05 -05008218static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008219create_trace_option_file(struct trace_array *tr,
8220 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008221 struct tracer_flags *flags,
8222 struct tracer_opt *opt)
8223{
8224 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008225
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008226 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008227 if (!t_options)
8228 return;
8229
8230 topt->flags = flags;
8231 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008232 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008233
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008234 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008235 &trace_options_fops);
8236
Steven Rostedt577b7852009-02-26 23:43:05 -05008237}
8238
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008239static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008240create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008241{
8242 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008243 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008244 struct tracer_flags *flags;
8245 struct tracer_opt *opts;
8246 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008247 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008248
8249 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008250 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008251
8252 flags = tracer->flags;
8253
8254 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008255 return;
8256
8257 /*
8258 * If this is an instance, only create flags for tracers
8259 * the instance may have.
8260 */
8261 if (!trace_ok_for_array(tracer, tr))
8262 return;
8263
8264 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008265 /* Make sure there's no duplicate flags. */
8266 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008267 return;
8268 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008269
8270 opts = flags->opts;
8271
8272 for (cnt = 0; opts[cnt].name; cnt++)
8273 ;
8274
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008275 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008276 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008277 return;
8278
8279 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8280 GFP_KERNEL);
8281 if (!tr_topts) {
8282 kfree(topts);
8283 return;
8284 }
8285
8286 tr->topts = tr_topts;
8287 tr->topts[tr->nr_topts].tracer = tracer;
8288 tr->topts[tr->nr_topts].topts = topts;
8289 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008290
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008291 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008292 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008293 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008294 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008295 "Failed to create trace option: %s",
8296 opts[cnt].name);
8297 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008298}
8299
Steven Rostedta8259072009-02-26 22:19:12 -05008300static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008301create_trace_option_core_file(struct trace_array *tr,
8302 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008303{
8304 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008305
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008306 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008307 if (!t_options)
8308 return NULL;
8309
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008310 return trace_create_file(option, 0644, t_options,
8311 (void *)&tr->trace_flags_index[index],
8312 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008313}
8314
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008315static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008316{
8317 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008318 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008319 int i;
8320
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008321 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008322 if (!t_options)
8323 return;
8324
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008325 for (i = 0; trace_options[i]; i++) {
8326 if (top_level ||
8327 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8328 create_trace_option_core_file(tr, trace_options[i], i);
8329 }
Steven Rostedta8259072009-02-26 22:19:12 -05008330}
8331
Steven Rostedt499e5472012-02-22 15:50:28 -05008332static ssize_t
8333rb_simple_read(struct file *filp, char __user *ubuf,
8334 size_t cnt, loff_t *ppos)
8335{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008336 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008337 char buf[64];
8338 int r;
8339
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008340 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008341 r = sprintf(buf, "%d\n", r);
8342
8343 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8344}
8345
8346static ssize_t
8347rb_simple_write(struct file *filp, const char __user *ubuf,
8348 size_t cnt, loff_t *ppos)
8349{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008350 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008351 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008352 unsigned long val;
8353 int ret;
8354
8355 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8356 if (ret)
8357 return ret;
8358
8359 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008360 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008361 if (!!val == tracer_tracing_is_on(tr)) {
8362 val = 0; /* do nothing */
8363 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008364 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008365 if (tr->current_trace->start)
8366 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008367 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008368 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008369 if (tr->current_trace->stop)
8370 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008371 }
8372 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008373 }
8374
8375 (*ppos)++;
8376
8377 return cnt;
8378}
8379
8380static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008381 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008382 .read = rb_simple_read,
8383 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008384 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008385 .llseek = default_llseek,
8386};
8387
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008388static ssize_t
8389buffer_percent_read(struct file *filp, char __user *ubuf,
8390 size_t cnt, loff_t *ppos)
8391{
8392 struct trace_array *tr = filp->private_data;
8393 char buf[64];
8394 int r;
8395
8396 r = tr->buffer_percent;
8397 r = sprintf(buf, "%d\n", r);
8398
8399 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8400}
8401
8402static ssize_t
8403buffer_percent_write(struct file *filp, const char __user *ubuf,
8404 size_t cnt, loff_t *ppos)
8405{
8406 struct trace_array *tr = filp->private_data;
8407 unsigned long val;
8408 int ret;
8409
8410 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8411 if (ret)
8412 return ret;
8413
8414 if (val > 100)
8415 return -EINVAL;
8416
8417 if (!val)
8418 val = 1;
8419
8420 tr->buffer_percent = val;
8421
8422 (*ppos)++;
8423
8424 return cnt;
8425}
8426
8427static const struct file_operations buffer_percent_fops = {
8428 .open = tracing_open_generic_tr,
8429 .read = buffer_percent_read,
8430 .write = buffer_percent_write,
8431 .release = tracing_release_generic_tr,
8432 .llseek = default_llseek,
8433};
8434
YueHaibingff585c52019-06-14 23:32:10 +08008435static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008436
8437static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008438init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008439
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008440static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008441allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008442{
8443 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008444
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008445 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008446
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008447 buf->tr = tr;
8448
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008449 buf->buffer = ring_buffer_alloc(size, rb_flags);
8450 if (!buf->buffer)
8451 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008452
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008453 buf->data = alloc_percpu(struct trace_array_cpu);
8454 if (!buf->data) {
8455 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008456 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008457 return -ENOMEM;
8458 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008459
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008460 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008461 set_buffer_entries(&tr->array_buffer,
8462 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008463
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008464 return 0;
8465}
8466
8467static int allocate_trace_buffers(struct trace_array *tr, int size)
8468{
8469 int ret;
8470
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008471 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008472 if (ret)
8473 return ret;
8474
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008475#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008476 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8477 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008478 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008479 ring_buffer_free(tr->array_buffer.buffer);
8480 tr->array_buffer.buffer = NULL;
8481 free_percpu(tr->array_buffer.data);
8482 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008483 return -ENOMEM;
8484 }
8485 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008486
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008487 /*
8488 * Only the top level trace array gets its snapshot allocated
8489 * from the kernel command line.
8490 */
8491 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008492#endif
8493 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008494}
8495
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008496static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008497{
8498 if (buf->buffer) {
8499 ring_buffer_free(buf->buffer);
8500 buf->buffer = NULL;
8501 free_percpu(buf->data);
8502 buf->data = NULL;
8503 }
8504}
8505
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008506static void free_trace_buffers(struct trace_array *tr)
8507{
8508 if (!tr)
8509 return;
8510
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008511 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008512
8513#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008514 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008515#endif
8516}
8517
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008518static void init_trace_flags_index(struct trace_array *tr)
8519{
8520 int i;
8521
8522 /* Used by the trace options files */
8523 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8524 tr->trace_flags_index[i] = i;
8525}
8526
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008527static void __update_tracer_options(struct trace_array *tr)
8528{
8529 struct tracer *t;
8530
8531 for (t = trace_types; t; t = t->next)
8532 add_tracer_options(tr, t);
8533}
8534
8535static void update_tracer_options(struct trace_array *tr)
8536{
8537 mutex_lock(&trace_types_lock);
8538 __update_tracer_options(tr);
8539 mutex_unlock(&trace_types_lock);
8540}
8541
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008542/* Must have trace_types_lock held */
8543struct trace_array *trace_array_find(const char *instance)
8544{
8545 struct trace_array *tr, *found = NULL;
8546
8547 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8548 if (tr->name && strcmp(tr->name, instance) == 0) {
8549 found = tr;
8550 break;
8551 }
8552 }
8553
8554 return found;
8555}
8556
8557struct trace_array *trace_array_find_get(const char *instance)
8558{
8559 struct trace_array *tr;
8560
8561 mutex_lock(&trace_types_lock);
8562 tr = trace_array_find(instance);
8563 if (tr)
8564 tr->ref++;
8565 mutex_unlock(&trace_types_lock);
8566
8567 return tr;
8568}
8569
Divya Indi28879782019-11-20 11:08:38 -08008570static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008571{
Steven Rostedt277ba042012-08-03 16:10:49 -04008572 struct trace_array *tr;
8573 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008574
Steven Rostedt277ba042012-08-03 16:10:49 -04008575 ret = -ENOMEM;
8576 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8577 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008578 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008579
8580 tr->name = kstrdup(name, GFP_KERNEL);
8581 if (!tr->name)
8582 goto out_free_tr;
8583
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008584 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8585 goto out_free_tr;
8586
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008587 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008588
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008589 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8590
Steven Rostedt277ba042012-08-03 16:10:49 -04008591 raw_spin_lock_init(&tr->start_lock);
8592
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008593 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8594
Steven Rostedt277ba042012-08-03 16:10:49 -04008595 tr->current_trace = &nop_trace;
8596
8597 INIT_LIST_HEAD(&tr->systems);
8598 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008599 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008600 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008601
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008602 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008603 goto out_free_tr;
8604
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008605 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008606 if (!tr->dir)
8607 goto out_free_tr;
8608
8609 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008610 if (ret) {
Al Viroa3d1e7e2019-11-18 09:43:10 -05008611 tracefs_remove(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008612 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008613 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008614
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008615 ftrace_init_trace_array(tr);
8616
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008617 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008618 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008619 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008620
8621 list_add(&tr->list, &ftrace_trace_arrays);
8622
Divya Indi28879782019-11-20 11:08:38 -08008623 tr->ref++;
8624
Steven Rostedt277ba042012-08-03 16:10:49 -04008625
Divya Indif45d1222019-03-20 11:28:51 -07008626 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008627
8628 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008629 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008630 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008631 kfree(tr->name);
8632 kfree(tr);
8633
Divya Indif45d1222019-03-20 11:28:51 -07008634 return ERR_PTR(ret);
8635}
Steven Rostedt277ba042012-08-03 16:10:49 -04008636
Divya Indif45d1222019-03-20 11:28:51 -07008637static int instance_mkdir(const char *name)
8638{
Divya Indi28879782019-11-20 11:08:38 -08008639 struct trace_array *tr;
8640 int ret;
8641
8642 mutex_lock(&event_mutex);
8643 mutex_lock(&trace_types_lock);
8644
8645 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008646 if (trace_array_find(name))
8647 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08008648
8649 tr = trace_array_create(name);
8650
8651 ret = PTR_ERR_OR_ZERO(tr);
8652
8653out_unlock:
8654 mutex_unlock(&trace_types_lock);
8655 mutex_unlock(&event_mutex);
8656 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008657}
8658
Divya Indi28879782019-11-20 11:08:38 -08008659/**
8660 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8661 * @name: The name of the trace array to be looked up/created.
8662 *
8663 * Returns pointer to trace array with given name.
8664 * NULL, if it cannot be created.
8665 *
8666 * NOTE: This function increments the reference counter associated with the
8667 * trace array returned. This makes sure it cannot be freed while in use.
8668 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05008669 * If the trace_array is to be freed, trace_array_destroy() needs to
8670 * be called after the trace_array_put(), or simply let user space delete
8671 * it from the tracefs instances directory. But until the
8672 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08008673 *
8674 */
8675struct trace_array *trace_array_get_by_name(const char *name)
8676{
8677 struct trace_array *tr;
8678
8679 mutex_lock(&event_mutex);
8680 mutex_lock(&trace_types_lock);
8681
8682 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8683 if (tr->name && strcmp(tr->name, name) == 0)
8684 goto out_unlock;
8685 }
8686
8687 tr = trace_array_create(name);
8688
8689 if (IS_ERR(tr))
8690 tr = NULL;
8691out_unlock:
8692 if (tr)
8693 tr->ref++;
8694
8695 mutex_unlock(&trace_types_lock);
8696 mutex_unlock(&event_mutex);
8697 return tr;
8698}
8699EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8700
Divya Indif45d1222019-03-20 11:28:51 -07008701static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008702{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008703 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008704
Divya Indi28879782019-11-20 11:08:38 -08008705 /* Reference counter for a newly created trace array = 1. */
8706 if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
Divya Indif45d1222019-03-20 11:28:51 -07008707 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008708
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008709 list_del(&tr->list);
8710
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008711 /* Disable all the flags that were enabled coming in */
8712 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8713 if ((1 << i) & ZEROED_TRACE_FLAGS)
8714 set_tracer_flag(tr, 1 << i, 0);
8715 }
8716
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008717 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308718 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008719 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008720 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008721 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05008722 tracefs_remove(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008723 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008724
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008725 for (i = 0; i < tr->nr_topts; i++) {
8726 kfree(tr->topts[i].topts);
8727 }
8728 kfree(tr->topts);
8729
Chunyu Hudb9108e02017-07-20 18:36:09 +08008730 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008731 kfree(tr->name);
8732 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008733 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008734
Divya Indif45d1222019-03-20 11:28:51 -07008735 return 0;
8736}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008737
Divya Indie585e642019-08-14 10:55:24 -07008738int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008739{
Divya Indie585e642019-08-14 10:55:24 -07008740 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07008741 int ret;
8742
Divya Indie585e642019-08-14 10:55:24 -07008743 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07008744 return -EINVAL;
8745
8746 mutex_lock(&event_mutex);
8747 mutex_lock(&trace_types_lock);
8748
Divya Indie585e642019-08-14 10:55:24 -07008749 ret = -ENODEV;
8750
8751 /* Making sure trace array exists before destroying it. */
8752 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8753 if (tr == this_tr) {
8754 ret = __remove_instance(tr);
8755 break;
8756 }
8757 }
Divya Indif45d1222019-03-20 11:28:51 -07008758
8759 mutex_unlock(&trace_types_lock);
8760 mutex_unlock(&event_mutex);
8761
8762 return ret;
8763}
8764EXPORT_SYMBOL_GPL(trace_array_destroy);
8765
8766static int instance_rmdir(const char *name)
8767{
8768 struct trace_array *tr;
8769 int ret;
8770
8771 mutex_lock(&event_mutex);
8772 mutex_lock(&trace_types_lock);
8773
8774 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008775 tr = trace_array_find(name);
8776 if (tr)
8777 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008778
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008779 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008780 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008781
8782 return ret;
8783}
8784
Steven Rostedt277ba042012-08-03 16:10:49 -04008785static __init void create_trace_instances(struct dentry *d_tracer)
8786{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008787 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8788 instance_mkdir,
8789 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008790 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04008791 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008792}
8793
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008794static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008795init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008796{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008797 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008798 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008799
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008800 trace_create_file("available_tracers", 0444, d_tracer,
8801 tr, &show_traces_fops);
8802
8803 trace_create_file("current_tracer", 0644, d_tracer,
8804 tr, &set_tracer_fops);
8805
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008806 trace_create_file("tracing_cpumask", 0644, d_tracer,
8807 tr, &tracing_cpumask_fops);
8808
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008809 trace_create_file("trace_options", 0644, d_tracer,
8810 tr, &tracing_iter_fops);
8811
8812 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008813 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008814
8815 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008816 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008817
8818 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008819 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008820
8821 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8822 tr, &tracing_total_entries_fops);
8823
Wang YanQing238ae932013-05-26 16:52:01 +08008824 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008825 tr, &tracing_free_buffer_fops);
8826
8827 trace_create_file("trace_marker", 0220, d_tracer,
8828 tr, &tracing_mark_fops);
8829
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008830 file = __find_event_file(tr, "ftrace", "print");
8831 if (file && file->dir)
8832 trace_create_file("trigger", 0644, file->dir, file,
8833 &event_trigger_fops);
8834 tr->trace_marker_file = file;
8835
Steven Rostedtfa32e852016-07-06 15:25:08 -04008836 trace_create_file("trace_marker_raw", 0220, d_tracer,
8837 tr, &tracing_mark_raw_fops);
8838
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008839 trace_create_file("trace_clock", 0644, d_tracer, tr,
8840 &trace_clock_fops);
8841
8842 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008843 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008844
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008845 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8846 &trace_time_stamp_mode_fops);
8847
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008848 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008849
8850 trace_create_file("buffer_percent", 0444, d_tracer,
8851 tr, &buffer_percent_fops);
8852
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008853 create_trace_options_dir(tr);
8854
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008855#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02008856 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008857#endif
8858
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008859 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008860 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008861
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008862#ifdef CONFIG_TRACER_SNAPSHOT
8863 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008864 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008865#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008866
Tom Zanussi8a062902019-03-31 18:48:15 -05008867 trace_create_file("error_log", 0644, d_tracer,
8868 tr, &tracing_err_log_fops);
8869
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008870 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008871 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008872
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008873 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008874}
8875
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008876static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008877{
8878 struct vfsmount *mnt;
8879 struct file_system_type *type;
8880
8881 /*
8882 * To maintain backward compatibility for tools that mount
8883 * debugfs to get to the tracing facility, tracefs is automatically
8884 * mounted to the debugfs/tracing directory.
8885 */
8886 type = get_fs_type("tracefs");
8887 if (!type)
8888 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008889 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008890 put_filesystem(type);
8891 if (IS_ERR(mnt))
8892 return NULL;
8893 mntget(mnt);
8894
8895 return mnt;
8896}
8897
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008898/**
8899 * tracing_init_dentry - initialize top level trace array
8900 *
8901 * This is called when creating files or directories in the tracing
8902 * directory. It is called via fs_initcall() by any of the boot up code
8903 * and expects to return the dentry of the top level tracing directory.
8904 */
8905struct dentry *tracing_init_dentry(void)
8906{
8907 struct trace_array *tr = &global_trace;
8908
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008909 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11008910 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05008911 return ERR_PTR(-EPERM);
8912 }
8913
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008914 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008915 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008916 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008917
Jiaxing Wang8b129192015-11-06 16:04:16 +08008918 if (WARN_ON(!tracefs_initialized()) ||
8919 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8920 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008921 return ERR_PTR(-ENODEV);
8922
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008923 /*
8924 * As there may still be users that expect the tracing
8925 * files to exist in debugfs/tracing, we must automount
8926 * the tracefs file system there, so older tools still
8927 * work with the newer kerenl.
8928 */
8929 tr->dir = debugfs_create_automount("tracing", NULL,
8930 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008931
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008932 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008933}
8934
Jeremy Linton00f4b652017-05-31 16:56:43 -05008935extern struct trace_eval_map *__start_ftrace_eval_maps[];
8936extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008937
Jeremy Linton5f60b352017-05-31 16:56:47 -05008938static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008939{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008940 int len;
8941
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008942 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008943 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008944}
8945
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008946#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008947static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008948{
Jeremy Linton99be6472017-05-31 16:56:44 -05008949 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008950 return;
8951
8952 /*
8953 * Modules with bad taint do not have events created, do
8954 * not bother with enums either.
8955 */
8956 if (trace_module_has_bad_taint(mod))
8957 return;
8958
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008959 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008960}
8961
Jeremy Linton681bec02017-05-31 16:56:53 -05008962#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008963static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008964{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008965 union trace_eval_map_item *map;
8966 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008967
Jeremy Linton99be6472017-05-31 16:56:44 -05008968 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008969 return;
8970
Jeremy Linton1793ed92017-05-31 16:56:46 -05008971 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008972
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008973 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008974
8975 while (map) {
8976 if (map->head.mod == mod)
8977 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008978 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008979 last = &map->tail.next;
8980 map = map->tail.next;
8981 }
8982 if (!map)
8983 goto out;
8984
Jeremy Linton5f60b352017-05-31 16:56:47 -05008985 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008986 kfree(map);
8987 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008988 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008989}
8990#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008991static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008992#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008993
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008994static int trace_module_notify(struct notifier_block *self,
8995 unsigned long val, void *data)
8996{
8997 struct module *mod = data;
8998
8999 switch (val) {
9000 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009001 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009002 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009003 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009004 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009005 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009006 }
9007
9008 return 0;
9009}
9010
9011static struct notifier_block trace_module_nb = {
9012 .notifier_call = trace_module_notify,
9013 .priority = 0,
9014};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009015#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009016
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009017static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009018{
9019 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009020
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009021 trace_access_lock_init();
9022
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009023 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05009024 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009025 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009026
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009027 event_trace_init();
9028
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009029 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04009030 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009031
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009032 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009033 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009034
Li Zefan339ae5d2009-04-17 10:34:30 +08009035 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009036 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009037
Avadh Patel69abe6a2009-04-10 16:04:48 -04009038 trace_create_file("saved_cmdlines", 0444, d_tracer,
9039 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009040
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009041 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9042 NULL, &tracing_saved_cmdlines_size_fops);
9043
Michael Sartain99c621d2017-07-05 22:07:15 -06009044 trace_create_file("saved_tgids", 0444, d_tracer,
9045 NULL, &tracing_saved_tgids_fops);
9046
Jeremy Linton5f60b352017-05-31 16:56:47 -05009047 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009048
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009049 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009050
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009051#ifdef CONFIG_MODULES
9052 register_module_notifier(&trace_module_nb);
9053#endif
9054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009055#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009056 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009057 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009058#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009059
Steven Rostedt277ba042012-08-03 16:10:49 -04009060 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009061
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009062 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009063
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009064 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009065}
9066
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009067static int trace_panic_handler(struct notifier_block *this,
9068 unsigned long event, void *unused)
9069{
Steven Rostedt944ac422008-10-23 19:26:08 -04009070 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009071 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009072 return NOTIFY_OK;
9073}
9074
9075static struct notifier_block trace_panic_notifier = {
9076 .notifier_call = trace_panic_handler,
9077 .next = NULL,
9078 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9079};
9080
9081static int trace_die_handler(struct notifier_block *self,
9082 unsigned long val,
9083 void *data)
9084{
9085 switch (val) {
9086 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009087 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009088 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009089 break;
9090 default:
9091 break;
9092 }
9093 return NOTIFY_OK;
9094}
9095
9096static struct notifier_block trace_die_notifier = {
9097 .notifier_call = trace_die_handler,
9098 .priority = 200
9099};
9100
9101/*
9102 * printk is set to max of 1024, we really don't need it that big.
9103 * Nothing should be printing 1000 characters anyway.
9104 */
9105#define TRACE_MAX_PRINT 1000
9106
9107/*
9108 * Define here KERN_TRACE so that we have one place to modify
9109 * it if we decide to change what log level the ftrace dump
9110 * should be at.
9111 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009112#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009113
Jason Wessel955b61e2010-08-05 09:22:23 -05009114void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009115trace_printk_seq(struct trace_seq *s)
9116{
9117 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009118 if (s->seq.len >= TRACE_MAX_PRINT)
9119 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009120
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009121 /*
9122 * More paranoid code. Although the buffer size is set to
9123 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9124 * an extra layer of protection.
9125 */
9126 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9127 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009128
9129 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009130 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009131
9132 printk(KERN_TRACE "%s", s->buffer);
9133
Steven Rostedtf9520752009-03-02 14:04:40 -05009134 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009135}
9136
Jason Wessel955b61e2010-08-05 09:22:23 -05009137void trace_init_global_iter(struct trace_iterator *iter)
9138{
9139 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009140 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009141 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009142 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009143
9144 if (iter->trace && iter->trace->open)
9145 iter->trace->open(iter);
9146
9147 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009148 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009149 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9150
9151 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9152 if (trace_clocks[iter->tr->clock_id].in_ns)
9153 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009154}
9155
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009156void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009157{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009158 /* use static because iter can be a bit big for the stack */
9159 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009160 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009161 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009162 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009163 unsigned long flags;
9164 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009165
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009166 /* Only allow one dump user at a time. */
9167 if (atomic_inc_return(&dump_running) != 1) {
9168 atomic_dec(&dump_running);
9169 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009170 }
9171
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009172 /*
9173 * Always turn off tracing when we dump.
9174 * We don't need to show trace output of what happens
9175 * between multiple crashes.
9176 *
9177 * If the user does a sysrq-z, then they can re-enable
9178 * tracing with echo 1 > tracing_on.
9179 */
9180 tracing_off();
9181
9182 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009183 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009184
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009185 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009186 trace_init_global_iter(&iter);
9187
Steven Rostedtd7690412008-10-01 00:29:53 -04009188 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009189 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009190 }
9191
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009192 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009193
Török Edwinb54d3de2008-11-22 13:28:48 +02009194 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009195 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009196
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009197 switch (oops_dump_mode) {
9198 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009199 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009200 break;
9201 case DUMP_ORIG:
9202 iter.cpu_file = raw_smp_processor_id();
9203 break;
9204 case DUMP_NONE:
9205 goto out_enable;
9206 default:
9207 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009208 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009209 }
9210
9211 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009212
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009213 /* Did function tracer already get disabled? */
9214 if (ftrace_is_dead()) {
9215 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9216 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9217 }
9218
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009219 /*
9220 * We need to stop all tracing on all CPUS to read the
9221 * the next buffer. This is a bit expensive, but is
9222 * not done often. We fill all what we can read,
9223 * and then release the locks again.
9224 */
9225
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009226 while (!trace_empty(&iter)) {
9227
9228 if (!cnt)
9229 printk(KERN_TRACE "---------------------------------\n");
9230
9231 cnt++;
9232
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009233 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009234 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009235
Jason Wessel955b61e2010-08-05 09:22:23 -05009236 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009237 int ret;
9238
9239 ret = print_trace_line(&iter);
9240 if (ret != TRACE_TYPE_NO_CONSUME)
9241 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009242 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009243 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009244
9245 trace_printk_seq(&iter.seq);
9246 }
9247
9248 if (!cnt)
9249 printk(KERN_TRACE " (ftrace buffer empty)\n");
9250 else
9251 printk(KERN_TRACE "---------------------------------\n");
9252
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009253 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009254 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009255
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009256 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009257 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009258 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009259 atomic_dec(&dump_running);
9260 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009261 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009262}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009263EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009264
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009265int trace_run_command(const char *buf, int (*createfn)(int, char **))
9266{
9267 char **argv;
9268 int argc, ret;
9269
9270 argc = 0;
9271 ret = 0;
9272 argv = argv_split(GFP_KERNEL, buf, &argc);
9273 if (!argv)
9274 return -ENOMEM;
9275
9276 if (argc)
9277 ret = createfn(argc, argv);
9278
9279 argv_free(argv);
9280
9281 return ret;
9282}
9283
9284#define WRITE_BUFSIZE 4096
9285
9286ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9287 size_t count, loff_t *ppos,
9288 int (*createfn)(int, char **))
9289{
9290 char *kbuf, *buf, *tmp;
9291 int ret = 0;
9292 size_t done = 0;
9293 size_t size;
9294
9295 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9296 if (!kbuf)
9297 return -ENOMEM;
9298
9299 while (done < count) {
9300 size = count - done;
9301
9302 if (size >= WRITE_BUFSIZE)
9303 size = WRITE_BUFSIZE - 1;
9304
9305 if (copy_from_user(kbuf, buffer + done, size)) {
9306 ret = -EFAULT;
9307 goto out;
9308 }
9309 kbuf[size] = '\0';
9310 buf = kbuf;
9311 do {
9312 tmp = strchr(buf, '\n');
9313 if (tmp) {
9314 *tmp = '\0';
9315 size = tmp - buf + 1;
9316 } else {
9317 size = strlen(buf);
9318 if (done + size < count) {
9319 if (buf != kbuf)
9320 break;
9321 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9322 pr_warn("Line length is too long: Should be less than %d\n",
9323 WRITE_BUFSIZE - 2);
9324 ret = -EINVAL;
9325 goto out;
9326 }
9327 }
9328 done += size;
9329
9330 /* Remove comments */
9331 tmp = strchr(buf, '#');
9332
9333 if (tmp)
9334 *tmp = '\0';
9335
9336 ret = trace_run_command(buf, createfn);
9337 if (ret)
9338 goto out;
9339 buf += size;
9340
9341 } while (done < count);
9342 }
9343 ret = done;
9344
9345out:
9346 kfree(kbuf);
9347
9348 return ret;
9349}
9350
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009351__init static int tracer_alloc_buffers(void)
9352{
Steven Rostedt73c51622009-03-11 13:42:01 -04009353 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309354 int ret = -ENOMEM;
9355
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009356
9357 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009358 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009359 return -EPERM;
9360 }
9361
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009362 /*
9363 * Make sure we don't accidently add more trace options
9364 * than we have bits for.
9365 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009366 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009367
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309368 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9369 goto out;
9370
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009371 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309372 goto out_free_buffer_mask;
9373
Steven Rostedt07d777f2011-09-22 14:01:55 -04009374 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009375 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009376 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009377 trace_printk_init_buffers();
9378
Steven Rostedt73c51622009-03-11 13:42:01 -04009379 /* To save memory, keep the ring buffer size to its minimum */
9380 if (ring_buffer_expanded)
9381 ring_buf_size = trace_buf_size;
9382 else
9383 ring_buf_size = 1;
9384
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309385 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009386 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009387
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009388 raw_spin_lock_init(&global_trace.start_lock);
9389
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009390 /*
9391 * The prepare callbacks allocates some memory for the ring buffer. We
9392 * don't free the buffer if the if the CPU goes down. If we were to free
9393 * the buffer, then the user would lose any trace that was in the
9394 * buffer. The memory will be removed once the "instance" is removed.
9395 */
9396 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9397 "trace/RB:preapre", trace_rb_cpu_prepare,
9398 NULL);
9399 if (ret < 0)
9400 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009401 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009402 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009403 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9404 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009405 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009406
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009407 if (trace_create_savedcmd() < 0)
9408 goto out_free_temp_buffer;
9409
Steven Rostedtab464282008-05-12 21:21:00 +02009410 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009411 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009412 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009413 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009414 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009415
Steven Rostedt499e5472012-02-22 15:50:28 -05009416 if (global_trace.buffer_disabled)
9417 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009418
Steven Rostedte1e232c2014-02-10 23:38:46 -05009419 if (trace_boot_clock) {
9420 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9421 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009422 pr_warn("Trace clock %s not defined, going back to default\n",
9423 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009424 }
9425
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009426 /*
9427 * register_tracer() might reference current_trace, so it
9428 * needs to be set before we register anything. This is
9429 * just a bootstrap of current_trace anyway.
9430 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009431 global_trace.current_trace = &nop_trace;
9432
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009433 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9434
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009435 ftrace_init_global_array_ops(&global_trace);
9436
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009437 init_trace_flags_index(&global_trace);
9438
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009439 register_tracer(&nop_trace);
9440
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009441 /* Function tracing may start here (via kernel command line) */
9442 init_function_trace();
9443
Steven Rostedt60a11772008-05-12 21:20:44 +02009444 /* All seems OK, enable tracing */
9445 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009446
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009447 atomic_notifier_chain_register(&panic_notifier_list,
9448 &trace_panic_notifier);
9449
9450 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009451
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009452 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9453
9454 INIT_LIST_HEAD(&global_trace.systems);
9455 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009456 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009457 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009458 list_add(&global_trace.list, &ftrace_trace_arrays);
9459
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009460 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009461
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009462 register_snapshot_cmd();
9463
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009464 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009465
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009466out_free_savedcmd:
9467 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009468out_free_temp_buffer:
9469 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009470out_rm_hp_state:
9471 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309472out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009473 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309474out_free_buffer_mask:
9475 free_cpumask_var(tracing_buffer_mask);
9476out:
9477 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009478}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009479
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009480void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009481{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009482 if (tracepoint_printk) {
9483 tracepoint_print_iter =
9484 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009485 if (MEM_FAIL(!tracepoint_print_iter,
9486 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009487 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009488 else
9489 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009490 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009491 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009492}
9493
9494void __init trace_init(void)
9495{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009496 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009497}
9498
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009499__init static int clear_boot_tracer(void)
9500{
9501 /*
9502 * The default tracer at boot buffer is an init section.
9503 * This function is called in lateinit. If we did not
9504 * find the boot tracer, then clear it out, to prevent
9505 * later registration from accessing the buffer that is
9506 * about to be freed.
9507 */
9508 if (!default_bootup_tracer)
9509 return 0;
9510
9511 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9512 default_bootup_tracer);
9513 default_bootup_tracer = NULL;
9514
9515 return 0;
9516}
9517
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009518fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009519late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009520
9521#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9522__init static int tracing_set_default_clock(void)
9523{
9524 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009525 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009526 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9527 pr_warn("Can not set tracing clock due to lockdown\n");
9528 return -EPERM;
9529 }
9530
Chris Wilson3fd49c92018-03-30 16:01:31 +01009531 printk(KERN_WARNING
9532 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9533 "If you want to keep using the local clock, then add:\n"
9534 " \"trace_clock=local\"\n"
9535 "on the kernel command line\n");
9536 tracing_set_clock(&global_trace, "global");
9537 }
9538
9539 return 0;
9540}
9541late_initcall_sync(tracing_set_default_clock);
9542#endif