blob: 2b4eff383505cafeba1fc432c7c3602cc0486909 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020048
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020049#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050050#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020051
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010052/*
Steven Rostedt73c51622009-03-11 13:42:01 -040053 * On boot up, the ring buffer is set to the minimum size, so that
54 * we do not waste memory on systems that are not using tracing.
55 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050056bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040057
58/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010062 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * at the same time, giving false positive or negative results.
64 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010065static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067/*
68 * If a tracer is running, we do not want to run SELFTEST.
69 */
Li Zefan020e5f82009-07-01 10:47:05 +080070bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050071
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050072/* Pipe tracepoints to printk */
73struct trace_iterator *tracepoint_print_iter;
74int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050075static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050076
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010077/* For tracers that don't implement custom flags */
78static struct tracer_opt dummy_tracer_opt[] = {
79 { }
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070093static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040094
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Jeremy Linton681bec02017-05-31 16:56:53 -0500126#ifdef CONFIG_TRACE_EVAL_MAP_FILE
127/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500128struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400129 struct module *mod;
130 unsigned long length;
131};
132
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500133union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400134
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500135struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400136 /*
137 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500138 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400139 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500140 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400141 const char *end; /* points to NULL */
142};
143
Jeremy Linton1793ed92017-05-31 16:56:46 -0500144static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145
146/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500147 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500151 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400152 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500153union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500154 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500155 struct trace_eval_map_head head;
156 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400157};
158
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500159static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500160#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200163static void ftrace_trace_userstack(struct ring_buffer *buffer,
164 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500165
Li Zefanee6c2c12009-09-18 14:06:47 +0800166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100169
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170static bool allocate_snapshot;
171
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200172static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173{
Chen Gang67012ab2013-04-08 12:06:44 +0800174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500175 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400176 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500177 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100178 return 1;
179}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200180__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181
Steven Rostedt944ac422008-10-23 19:26:08 -0400182static int __init set_ftrace_dump_on_oops(char *str)
183{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200197
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198static int __init stop_trace_on_warning(char *str)
199{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202 return 1;
203}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200204__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400213__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500214
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400217
218static int __init set_trace_boot_options(char *str)
219{
Chen Gang67012ab2013-04-08 12:06:44 +0800220 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400221 return 0;
222}
223__setup("trace_options=", set_trace_boot_options);
224
Steven Rostedte1e232c2014-02-10 23:38:46 -0500225static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226static char *trace_boot_clock __initdata;
227
228static int __init set_trace_boot_clock(char *str)
229{
230 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231 trace_boot_clock = trace_boot_clock_buf;
232 return 0;
233}
234__setup("trace_clock=", set_trace_boot_clock);
235
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500236static int __init set_tracepoint_printk(char *str)
237{
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 tracepoint_printk = 1;
240 return 1;
241}
242__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400243
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100244unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200245{
246 nsec += 500;
247 do_div(nsec, 1000);
248 return nsec;
249}
250
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400251/* trace_flags holds trace_options default values */
252#define TRACE_DEFAULT_FLAGS \
253 (FUNCTION_DEFAULT_FLAGS | \
254 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
255 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
256 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
257 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400259/* trace_options that are only supported by global_trace */
260#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
261 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400263/* trace_flags that are default zero for instances */
264#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900265 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400266
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200267/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800268 * The global_trace is the descriptor that holds the top-level tracing
269 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200270 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400271static struct trace_array global_trace = {
272 .trace_flags = TRACE_DEFAULT_FLAGS,
273};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200274
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400275LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200276
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400277int trace_array_get(struct trace_array *this_tr)
278{
279 struct trace_array *tr;
280 int ret = -ENODEV;
281
282 mutex_lock(&trace_types_lock);
283 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
284 if (tr == this_tr) {
285 tr->ref++;
286 ret = 0;
287 break;
288 }
289 }
290 mutex_unlock(&trace_types_lock);
291
292 return ret;
293}
294
295static void __trace_array_put(struct trace_array *this_tr)
296{
297 WARN_ON(!this_tr->ref);
298 this_tr->ref--;
299}
300
301void trace_array_put(struct trace_array *this_tr)
302{
303 mutex_lock(&trace_types_lock);
304 __trace_array_put(this_tr);
305 mutex_unlock(&trace_types_lock);
306}
307
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400308int tracing_check_open_get_tr(struct trace_array *tr)
309{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400310 int ret;
311
312 ret = security_locked_down(LOCKDOWN_TRACEFS);
313 if (ret)
314 return ret;
315
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400316 if (tracing_disabled)
317 return -ENODEV;
318
319 if (tr && trace_array_get(tr) < 0)
320 return -ENODEV;
321
322 return 0;
323}
324
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400325int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400331 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500332 return 1;
333 }
334
335 return 0;
336}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500337
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400338void trace_free_pid_list(struct trace_pid_list *pid_list)
339{
340 vfree(pid_list->pids);
341 kfree(pid_list);
342}
343
Steven Rostedtd8275c42016-04-14 12:15:22 -0400344/**
345 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
346 * @filtered_pids: The list of pids to check
347 * @search_pid: The PID to find in @filtered_pids
348 *
349 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
350 */
351bool
352trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
353{
354 /*
355 * If pid_max changed after filtered_pids was created, we
356 * by default ignore all pids greater than the previous pid_max.
357 */
358 if (search_pid >= filtered_pids->pid_max)
359 return false;
360
361 return test_bit(search_pid, filtered_pids->pids);
362}
363
364/**
365 * trace_ignore_this_task - should a task be ignored for tracing
366 * @filtered_pids: The list of pids to check
367 * @task: The task that should be ignored if not filtered
368 *
369 * Checks if @task should be traced or not from @filtered_pids.
370 * Returns true if @task should *NOT* be traced.
371 * Returns false if @task should be traced.
372 */
373bool
374trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
375{
376 /*
377 * Return false, because if filtered_pids does not exist,
378 * all pids are good to trace.
379 */
380 if (!filtered_pids)
381 return false;
382
383 return !trace_find_filtered_pid(filtered_pids, task->pid);
384}
385
386/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700387 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400388 * @pid_list: The list to modify
389 * @self: The current task for fork or NULL for exit
390 * @task: The task to add or remove
391 *
392 * If adding a task, if @self is defined, the task is only added if @self
393 * is also included in @pid_list. This happens on fork and tasks should
394 * only be added when the parent is listed. If @self is NULL, then the
395 * @task pid will be removed from the list, which would happen on exit
396 * of a task.
397 */
398void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
399 struct task_struct *self,
400 struct task_struct *task)
401{
402 if (!pid_list)
403 return;
404
405 /* For forks, we only add if the forking task is listed */
406 if (self) {
407 if (!trace_find_filtered_pid(pid_list, self->pid))
408 return;
409 }
410
411 /* Sorry, but we don't support pid_max changing after setting */
412 if (task->pid >= pid_list->pid_max)
413 return;
414
415 /* "self" is set for forks, and NULL for exits */
416 if (self)
417 set_bit(task->pid, pid_list->pids);
418 else
419 clear_bit(task->pid, pid_list->pids);
420}
421
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400422/**
423 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
424 * @pid_list: The pid list to show
425 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
426 * @pos: The position of the file
427 *
428 * This is used by the seq_file "next" operation to iterate the pids
429 * listed in a trace_pid_list structure.
430 *
431 * Returns the pid+1 as we want to display pid of zero, but NULL would
432 * stop the iteration.
433 */
434void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
435{
436 unsigned long pid = (unsigned long)v;
437
438 (*pos)++;
439
440 /* pid already is +1 of the actual prevous bit */
441 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
442
443 /* Return pid + 1 to allow zero to be represented */
444 if (pid < pid_list->pid_max)
445 return (void *)(pid + 1);
446
447 return NULL;
448}
449
450/**
451 * trace_pid_start - Used for seq_file to start reading pid lists
452 * @pid_list: The pid list to show
453 * @pos: The position of the file
454 *
455 * This is used by seq_file "start" operation to start the iteration
456 * of listing pids.
457 *
458 * Returns the pid+1 as we want to display pid of zero, but NULL would
459 * stop the iteration.
460 */
461void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
462{
463 unsigned long pid;
464 loff_t l = 0;
465
466 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
467 if (pid >= pid_list->pid_max)
468 return NULL;
469
470 /* Return pid + 1 so that zero can be the exit value */
471 for (pid++; pid && l < *pos;
472 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
473 ;
474 return (void *)pid;
475}
476
477/**
478 * trace_pid_show - show the current pid in seq_file processing
479 * @m: The seq_file structure to write into
480 * @v: A void pointer of the pid (+1) value to display
481 *
482 * Can be directly used by seq_file operations to display the current
483 * pid value.
484 */
485int trace_pid_show(struct seq_file *m, void *v)
486{
487 unsigned long pid = (unsigned long)v - 1;
488
489 seq_printf(m, "%lu\n", pid);
490 return 0;
491}
492
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400493/* 128 should be much more than enough */
494#define PID_BUF_SIZE 127
495
496int trace_pid_write(struct trace_pid_list *filtered_pids,
497 struct trace_pid_list **new_pid_list,
498 const char __user *ubuf, size_t cnt)
499{
500 struct trace_pid_list *pid_list;
501 struct trace_parser parser;
502 unsigned long val;
503 int nr_pids = 0;
504 ssize_t read = 0;
505 ssize_t ret = 0;
506 loff_t pos;
507 pid_t pid;
508
509 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
510 return -ENOMEM;
511
512 /*
513 * Always recreate a new array. The write is an all or nothing
514 * operation. Always create a new array when adding new pids by
515 * the user. If the operation fails, then the current list is
516 * not modified.
517 */
518 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500519 if (!pid_list) {
520 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400521 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500522 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400523
524 pid_list->pid_max = READ_ONCE(pid_max);
525
526 /* Only truncating will shrink pid_max */
527 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
528 pid_list->pid_max = filtered_pids->pid_max;
529
530 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
531 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500532 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400533 kfree(pid_list);
534 return -ENOMEM;
535 }
536
537 if (filtered_pids) {
538 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000539 for_each_set_bit(pid, filtered_pids->pids,
540 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400541 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400542 nr_pids++;
543 }
544 }
545
546 while (cnt > 0) {
547
548 pos = 0;
549
550 ret = trace_get_user(&parser, ubuf, cnt, &pos);
551 if (ret < 0 || !trace_parser_loaded(&parser))
552 break;
553
554 read += ret;
555 ubuf += ret;
556 cnt -= ret;
557
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400558 ret = -EINVAL;
559 if (kstrtoul(parser.buffer, 0, &val))
560 break;
561 if (val >= pid_list->pid_max)
562 break;
563
564 pid = (pid_t)val;
565
566 set_bit(pid, pid_list->pids);
567 nr_pids++;
568
569 trace_parser_clear(&parser);
570 ret = 0;
571 }
572 trace_parser_put(&parser);
573
574 if (ret < 0) {
575 trace_free_pid_list(pid_list);
576 return ret;
577 }
578
579 if (!nr_pids) {
580 /* Cleared the list of pids */
581 trace_free_pid_list(pid_list);
582 read = ret;
583 pid_list = NULL;
584 }
585
586 *new_pid_list = pid_list;
587
588 return read;
589}
590
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100591static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400592{
593 u64 ts;
594
595 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700596 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400597 return trace_clock_local();
598
Alexander Z Lam94571582013-08-02 18:36:16 -0700599 ts = ring_buffer_time_stamp(buf->buffer, cpu);
600 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400601
602 return ts;
603}
604
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100605u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700606{
607 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
608}
609
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400610/**
611 * tracing_is_enabled - Show if global_trace has been disabled
612 *
613 * Shows if the global trace has been enabled or not. It uses the
614 * mirror flag "buffer_disabled" to be used in fast paths such as for
615 * the irqsoff tracer. But it may be inaccurate due to races. If you
616 * need to know the accurate state, use tracing_is_on() which is a little
617 * slower, but accurate.
618 */
Steven Rostedt90369902008-11-05 16:05:44 -0500619int tracing_is_enabled(void)
620{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400621 /*
622 * For quick access (irqsoff uses this in fast path), just
623 * return the mirror variable of the state of the ring buffer.
624 * It's a little racy, but we don't really care.
625 */
626 smp_rmb();
627 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500628}
629
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400631 * trace_buf_size is the size in bytes that is allocated
632 * for a buffer. Note, the number of bytes is always rounded
633 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400634 *
635 * This number is purposely set to a low number of 16384.
636 * If the dump on oops happens, it will be much appreciated
637 * to not have to wait for all that output. Anyway this can be
638 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200639 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400640#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400641
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400642static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200643
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200644/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200645static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200646
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200647/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200648 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200649 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700650DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200651
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800652/*
653 * serialize the access of the ring buffer
654 *
655 * ring buffer serializes readers, but it is low level protection.
656 * The validity of the events (which returns by ring_buffer_peek() ..etc)
657 * are not protected by ring buffer.
658 *
659 * The content of events may become garbage if we allow other process consumes
660 * these events concurrently:
661 * A) the page of the consumed events may become a normal page
662 * (not reader page) in ring buffer, and this page will be rewrited
663 * by events producer.
664 * B) The page of the consumed events may become a page for splice_read,
665 * and this page will be returned to system.
666 *
667 * These primitives allow multi process access to different cpu ring buffer
668 * concurrently.
669 *
670 * These primitives don't distinguish read-only and read-consume access.
671 * Multi read-only access are also serialized.
672 */
673
674#ifdef CONFIG_SMP
675static DECLARE_RWSEM(all_cpu_access_lock);
676static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
677
678static inline void trace_access_lock(int cpu)
679{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500680 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800681 /* gain it for accessing the whole ring buffer. */
682 down_write(&all_cpu_access_lock);
683 } else {
684 /* gain it for accessing a cpu ring buffer. */
685
Steven Rostedtae3b5092013-01-23 15:22:59 -0500686 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800687 down_read(&all_cpu_access_lock);
688
689 /* Secondly block other access to this @cpu ring buffer. */
690 mutex_lock(&per_cpu(cpu_access_lock, cpu));
691 }
692}
693
694static inline void trace_access_unlock(int cpu)
695{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500696 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800697 up_write(&all_cpu_access_lock);
698 } else {
699 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
700 up_read(&all_cpu_access_lock);
701 }
702}
703
704static inline void trace_access_lock_init(void)
705{
706 int cpu;
707
708 for_each_possible_cpu(cpu)
709 mutex_init(&per_cpu(cpu_access_lock, cpu));
710}
711
712#else
713
714static DEFINE_MUTEX(access_lock);
715
716static inline void trace_access_lock(int cpu)
717{
718 (void)cpu;
719 mutex_lock(&access_lock);
720}
721
722static inline void trace_access_unlock(int cpu)
723{
724 (void)cpu;
725 mutex_unlock(&access_lock);
726}
727
728static inline void trace_access_lock_init(void)
729{
730}
731
732#endif
733
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400734#ifdef CONFIG_STACKTRACE
735static void __ftrace_trace_stack(struct ring_buffer *buffer,
736 unsigned long flags,
737 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400738static inline void ftrace_trace_stack(struct trace_array *tr,
739 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400740 unsigned long flags,
741 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400742
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400743#else
744static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
745 unsigned long flags,
746 int skip, int pc, struct pt_regs *regs)
747{
748}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400749static inline void ftrace_trace_stack(struct trace_array *tr,
750 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400751 unsigned long flags,
752 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400753{
754}
755
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400756#endif
757
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500758static __always_inline void
759trace_event_setup(struct ring_buffer_event *event,
760 int type, unsigned long flags, int pc)
761{
762 struct trace_entry *ent = ring_buffer_event_data(event);
763
Cong Wang46710f32019-05-25 09:57:59 -0700764 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500765}
766
767static __always_inline struct ring_buffer_event *
768__trace_buffer_lock_reserve(struct ring_buffer *buffer,
769 int type,
770 unsigned long len,
771 unsigned long flags, int pc)
772{
773 struct ring_buffer_event *event;
774
775 event = ring_buffer_lock_reserve(buffer, len);
776 if (event != NULL)
777 trace_event_setup(event, type, flags, pc);
778
779 return event;
780}
781
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400782void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400783{
784 if (tr->trace_buffer.buffer)
785 ring_buffer_record_on(tr->trace_buffer.buffer);
786 /*
787 * This flag is looked at when buffers haven't been allocated
788 * yet, or by some tracers (like irqsoff), that just want to
789 * know if the ring buffer has been disabled, but it can handle
790 * races of where it gets disabled but we still do a record.
791 * As the check is in the fast path of the tracers, it is more
792 * important to be fast than accurate.
793 */
794 tr->buffer_disabled = 0;
795 /* Make the flag seen by readers */
796 smp_wmb();
797}
798
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500800 * tracing_on - enable tracing buffers
801 *
802 * This function enables tracing buffers that may have been
803 * disabled with tracing_off.
804 */
805void tracing_on(void)
806{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400807 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500808}
809EXPORT_SYMBOL_GPL(tracing_on);
810
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500811
812static __always_inline void
813__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
814{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700815 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500816
817 /* If this is the temp buffer, we need to commit fully */
818 if (this_cpu_read(trace_buffered_event) == event) {
819 /* Length is in event->array[0] */
820 ring_buffer_write(buffer, event->array[0], &event->array[1]);
821 /* Release the temp buffer */
822 this_cpu_dec(trace_buffered_event_cnt);
823 } else
824 ring_buffer_unlock_commit(buffer, event);
825}
826
Steven Rostedt499e5472012-02-22 15:50:28 -0500827/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 * __trace_puts - write a constant string into the trace buffer.
829 * @ip: The address of the caller
830 * @str: The constant string to write
831 * @size: The size of the string.
832 */
833int __trace_puts(unsigned long ip, const char *str, int size)
834{
835 struct ring_buffer_event *event;
836 struct ring_buffer *buffer;
837 struct print_entry *entry;
838 unsigned long irq_flags;
839 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800840 int pc;
841
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400842 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800843 return 0;
844
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800845 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500846
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500847 if (unlikely(tracing_selftest_running || tracing_disabled))
848 return 0;
849
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500850 alloc = sizeof(*entry) + size + 2; /* possible \n added */
851
852 local_save_flags(irq_flags);
853 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500854 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
855 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500856 if (!event)
857 return 0;
858
859 entry = ring_buffer_event_data(event);
860 entry->ip = ip;
861
862 memcpy(&entry->buf, str, size);
863
864 /* Add a newline if necessary */
865 if (entry->buf[size - 1] != '\n') {
866 entry->buf[size] = '\n';
867 entry->buf[size + 1] = '\0';
868 } else
869 entry->buf[size] = '\0';
870
871 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400872 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500873
874 return size;
875}
876EXPORT_SYMBOL_GPL(__trace_puts);
877
878/**
879 * __trace_bputs - write the pointer to a constant string into trace buffer
880 * @ip: The address of the caller
881 * @str: The constant string to write to the buffer to
882 */
883int __trace_bputs(unsigned long ip, const char *str)
884{
885 struct ring_buffer_event *event;
886 struct ring_buffer *buffer;
887 struct bputs_entry *entry;
888 unsigned long irq_flags;
889 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800890 int pc;
891
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400892 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800893 return 0;
894
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800895 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500896
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500897 if (unlikely(tracing_selftest_running || tracing_disabled))
898 return 0;
899
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500900 local_save_flags(irq_flags);
901 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500902 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
903 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500904 if (!event)
905 return 0;
906
907 entry = ring_buffer_event_data(event);
908 entry->ip = ip;
909 entry->str = str;
910
911 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400912 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500913
914 return 1;
915}
916EXPORT_SYMBOL_GPL(__trace_bputs);
917
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500918#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600919void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500920{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500921 struct tracer *tracer = tr->current_trace;
922 unsigned long flags;
923
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500924 if (in_nmi()) {
925 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
926 internal_trace_puts("*** snapshot is being ignored ***\n");
927 return;
928 }
929
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500930 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500931 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
932 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500933 tracing_off();
934 return;
935 }
936
937 /* Note, snapshot can not be used when the tracer uses it */
938 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500939 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
940 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500941 return;
942 }
943
944 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600945 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500946 local_irq_restore(flags);
947}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400948
Tom Zanussia35873a2019-02-13 17:42:45 -0600949void tracing_snapshot_instance(struct trace_array *tr)
950{
951 tracing_snapshot_instance_cond(tr, NULL);
952}
953
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400954/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800955 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400956 *
957 * This causes a swap between the snapshot buffer and the current live
958 * tracing buffer. You can use this to take snapshots of the live
959 * trace when some condition is triggered, but continue to trace.
960 *
961 * Note, make sure to allocate the snapshot with either
962 * a tracing_snapshot_alloc(), or by doing it manually
963 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
964 *
965 * If the snapshot buffer is not allocated, it will stop tracing.
966 * Basically making a permanent snapshot.
967 */
968void tracing_snapshot(void)
969{
970 struct trace_array *tr = &global_trace;
971
972 tracing_snapshot_instance(tr);
973}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500974EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500975
Tom Zanussia35873a2019-02-13 17:42:45 -0600976/**
977 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
978 * @tr: The tracing instance to snapshot
979 * @cond_data: The data to be tested conditionally, and possibly saved
980 *
981 * This is the same as tracing_snapshot() except that the snapshot is
982 * conditional - the snapshot will only happen if the
983 * cond_snapshot.update() implementation receiving the cond_data
984 * returns true, which means that the trace array's cond_snapshot
985 * update() operation used the cond_data to determine whether the
986 * snapshot should be taken, and if it was, presumably saved it along
987 * with the snapshot.
988 */
989void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
990{
991 tracing_snapshot_instance_cond(tr, cond_data);
992}
993EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
994
995/**
996 * tracing_snapshot_cond_data - get the user data associated with a snapshot
997 * @tr: The tracing instance
998 *
999 * When the user enables a conditional snapshot using
1000 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1001 * with the snapshot. This accessor is used to retrieve it.
1002 *
1003 * Should not be called from cond_snapshot.update(), since it takes
1004 * the tr->max_lock lock, which the code calling
1005 * cond_snapshot.update() has already done.
1006 *
1007 * Returns the cond_data associated with the trace array's snapshot.
1008 */
1009void *tracing_cond_snapshot_data(struct trace_array *tr)
1010{
1011 void *cond_data = NULL;
1012
1013 arch_spin_lock(&tr->max_lock);
1014
1015 if (tr->cond_snapshot)
1016 cond_data = tr->cond_snapshot->cond_data;
1017
1018 arch_spin_unlock(&tr->max_lock);
1019
1020 return cond_data;
1021}
1022EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1023
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001024static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1025 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001026static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1027
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001028int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001029{
1030 int ret;
1031
1032 if (!tr->allocated_snapshot) {
1033
1034 /* allocate spare buffer */
1035 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1036 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1037 if (ret < 0)
1038 return ret;
1039
1040 tr->allocated_snapshot = true;
1041 }
1042
1043 return 0;
1044}
1045
Fabian Frederickad1438a2014-04-17 21:44:42 +02001046static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001047{
1048 /*
1049 * We don't free the ring buffer. instead, resize it because
1050 * The max_tr ring buffer has some state (e.g. ring->clock) and
1051 * we want preserve it.
1052 */
1053 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1054 set_buffer_entries(&tr->max_buffer, 1);
1055 tracing_reset_online_cpus(&tr->max_buffer);
1056 tr->allocated_snapshot = false;
1057}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001058
1059/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001060 * tracing_alloc_snapshot - allocate snapshot buffer.
1061 *
1062 * This only allocates the snapshot buffer if it isn't already
1063 * allocated - it doesn't also take a snapshot.
1064 *
1065 * This is meant to be used in cases where the snapshot buffer needs
1066 * to be set up for events that can't sleep but need to be able to
1067 * trigger a snapshot.
1068 */
1069int tracing_alloc_snapshot(void)
1070{
1071 struct trace_array *tr = &global_trace;
1072 int ret;
1073
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001074 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001075 WARN_ON(ret < 0);
1076
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1080
1081/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001082 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001083 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001084 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001085 * snapshot buffer if it isn't already allocated. Use this only
1086 * where it is safe to sleep, as the allocation may sleep.
1087 *
1088 * This causes a swap between the snapshot buffer and the current live
1089 * tracing buffer. You can use this to take snapshots of the live
1090 * trace when some condition is triggered, but continue to trace.
1091 */
1092void tracing_snapshot_alloc(void)
1093{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001094 int ret;
1095
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001096 ret = tracing_alloc_snapshot();
1097 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001098 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001099
1100 tracing_snapshot();
1101}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001102EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001103
1104/**
1105 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1106 * @tr: The tracing instance
1107 * @cond_data: User data to associate with the snapshot
1108 * @update: Implementation of the cond_snapshot update function
1109 *
1110 * Check whether the conditional snapshot for the given instance has
1111 * already been enabled, or if the current tracer is already using a
1112 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1113 * save the cond_data and update function inside.
1114 *
1115 * Returns 0 if successful, error otherwise.
1116 */
1117int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1118 cond_update_fn_t update)
1119{
1120 struct cond_snapshot *cond_snapshot;
1121 int ret = 0;
1122
1123 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1124 if (!cond_snapshot)
1125 return -ENOMEM;
1126
1127 cond_snapshot->cond_data = cond_data;
1128 cond_snapshot->update = update;
1129
1130 mutex_lock(&trace_types_lock);
1131
1132 ret = tracing_alloc_snapshot_instance(tr);
1133 if (ret)
1134 goto fail_unlock;
1135
1136 if (tr->current_trace->use_max_tr) {
1137 ret = -EBUSY;
1138 goto fail_unlock;
1139 }
1140
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001141 /*
1142 * The cond_snapshot can only change to NULL without the
1143 * trace_types_lock. We don't care if we race with it going
1144 * to NULL, but we want to make sure that it's not set to
1145 * something other than NULL when we get here, which we can
1146 * do safely with only holding the trace_types_lock and not
1147 * having to take the max_lock.
1148 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001149 if (tr->cond_snapshot) {
1150 ret = -EBUSY;
1151 goto fail_unlock;
1152 }
1153
1154 arch_spin_lock(&tr->max_lock);
1155 tr->cond_snapshot = cond_snapshot;
1156 arch_spin_unlock(&tr->max_lock);
1157
1158 mutex_unlock(&trace_types_lock);
1159
1160 return ret;
1161
1162 fail_unlock:
1163 mutex_unlock(&trace_types_lock);
1164 kfree(cond_snapshot);
1165 return ret;
1166}
1167EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1168
1169/**
1170 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1171 * @tr: The tracing instance
1172 *
1173 * Check whether the conditional snapshot for the given instance is
1174 * enabled; if so, free the cond_snapshot associated with it,
1175 * otherwise return -EINVAL.
1176 *
1177 * Returns 0 if successful, error otherwise.
1178 */
1179int tracing_snapshot_cond_disable(struct trace_array *tr)
1180{
1181 int ret = 0;
1182
1183 arch_spin_lock(&tr->max_lock);
1184
1185 if (!tr->cond_snapshot)
1186 ret = -EINVAL;
1187 else {
1188 kfree(tr->cond_snapshot);
1189 tr->cond_snapshot = NULL;
1190 }
1191
1192 arch_spin_unlock(&tr->max_lock);
1193
1194 return ret;
1195}
1196EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001197#else
1198void tracing_snapshot(void)
1199{
1200 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1201}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001202EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001203void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1204{
1205 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1206}
1207EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001208int tracing_alloc_snapshot(void)
1209{
1210 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1211 return -ENODEV;
1212}
1213EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001214void tracing_snapshot_alloc(void)
1215{
1216 /* Give warning */
1217 tracing_snapshot();
1218}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001219EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001220void *tracing_cond_snapshot_data(struct trace_array *tr)
1221{
1222 return NULL;
1223}
1224EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1225int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1226{
1227 return -ENODEV;
1228}
1229EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1230int tracing_snapshot_cond_disable(struct trace_array *tr)
1231{
1232 return false;
1233}
1234EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001235#endif /* CONFIG_TRACER_SNAPSHOT */
1236
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001237void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001238{
1239 if (tr->trace_buffer.buffer)
1240 ring_buffer_record_off(tr->trace_buffer.buffer);
1241 /*
1242 * This flag is looked at when buffers haven't been allocated
1243 * yet, or by some tracers (like irqsoff), that just want to
1244 * know if the ring buffer has been disabled, but it can handle
1245 * races of where it gets disabled but we still do a record.
1246 * As the check is in the fast path of the tracers, it is more
1247 * important to be fast than accurate.
1248 */
1249 tr->buffer_disabled = 1;
1250 /* Make the flag seen by readers */
1251 smp_wmb();
1252}
1253
Steven Rostedt499e5472012-02-22 15:50:28 -05001254/**
1255 * tracing_off - turn off tracing buffers
1256 *
1257 * This function stops the tracing buffers from recording data.
1258 * It does not disable any overhead the tracers themselves may
1259 * be causing. This function simply causes all recording to
1260 * the ring buffers to fail.
1261 */
1262void tracing_off(void)
1263{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001264 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001265}
1266EXPORT_SYMBOL_GPL(tracing_off);
1267
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001268void disable_trace_on_warning(void)
1269{
1270 if (__disable_trace_on_warning)
1271 tracing_off();
1272}
1273
Steven Rostedt499e5472012-02-22 15:50:28 -05001274/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001275 * tracer_tracing_is_on - show real state of ring buffer enabled
1276 * @tr : the trace array to know if ring buffer is enabled
1277 *
1278 * Shows real state of the ring buffer if it is enabled or not.
1279 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001280bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001281{
1282 if (tr->trace_buffer.buffer)
1283 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1284 return !tr->buffer_disabled;
1285}
1286
Steven Rostedt499e5472012-02-22 15:50:28 -05001287/**
1288 * tracing_is_on - show state of ring buffers enabled
1289 */
1290int tracing_is_on(void)
1291{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001292 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001293}
1294EXPORT_SYMBOL_GPL(tracing_is_on);
1295
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001296static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001297{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001298 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001300 if (!str)
1301 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001302 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001303 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001304 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001305 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001306 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307 return 1;
1308}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001309__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001310
Tim Bird0e950172010-02-25 15:36:43 -08001311static int __init set_tracing_thresh(char *str)
1312{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001313 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001314 int ret;
1315
1316 if (!str)
1317 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001318 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001319 if (ret < 0)
1320 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001321 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001322 return 1;
1323}
1324__setup("tracing_thresh=", set_tracing_thresh);
1325
Steven Rostedt57f50be2008-05-12 21:20:44 +02001326unsigned long nsecs_to_usecs(unsigned long nsecs)
1327{
1328 return nsecs / 1000;
1329}
1330
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001331/*
1332 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001333 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001334 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001335 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001336 */
1337#undef C
1338#define C(a, b) b
1339
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001340/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001341static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001342 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001343 NULL
1344};
1345
Zhaolei5079f322009-08-25 16:12:56 +08001346static struct {
1347 u64 (*func)(void);
1348 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001349 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001350} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001351 { trace_clock_local, "local", 1 },
1352 { trace_clock_global, "global", 1 },
1353 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001354 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001355 { trace_clock, "perf", 1 },
1356 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001357 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001358 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001359 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001360};
1361
Tom Zanussi860f9f62018-01-15 20:51:48 -06001362bool trace_clock_in_ns(struct trace_array *tr)
1363{
1364 if (trace_clocks[tr->clock_id].in_ns)
1365 return true;
1366
1367 return false;
1368}
1369
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001370/*
1371 * trace_parser_get_init - gets the buffer for trace parser
1372 */
1373int trace_parser_get_init(struct trace_parser *parser, int size)
1374{
1375 memset(parser, 0, sizeof(*parser));
1376
1377 parser->buffer = kmalloc(size, GFP_KERNEL);
1378 if (!parser->buffer)
1379 return 1;
1380
1381 parser->size = size;
1382 return 0;
1383}
1384
1385/*
1386 * trace_parser_put - frees the buffer for trace parser
1387 */
1388void trace_parser_put(struct trace_parser *parser)
1389{
1390 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001391 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001392}
1393
1394/*
1395 * trace_get_user - reads the user input string separated by space
1396 * (matched by isspace(ch))
1397 *
1398 * For each string found the 'struct trace_parser' is updated,
1399 * and the function returns.
1400 *
1401 * Returns number of bytes read.
1402 *
1403 * See kernel/trace/trace.h for 'struct trace_parser' details.
1404 */
1405int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1406 size_t cnt, loff_t *ppos)
1407{
1408 char ch;
1409 size_t read = 0;
1410 ssize_t ret;
1411
1412 if (!*ppos)
1413 trace_parser_clear(parser);
1414
1415 ret = get_user(ch, ubuf++);
1416 if (ret)
1417 goto out;
1418
1419 read++;
1420 cnt--;
1421
1422 /*
1423 * The parser is not finished with the last write,
1424 * continue reading the user input without skipping spaces.
1425 */
1426 if (!parser->cont) {
1427 /* skip white space */
1428 while (cnt && isspace(ch)) {
1429 ret = get_user(ch, ubuf++);
1430 if (ret)
1431 goto out;
1432 read++;
1433 cnt--;
1434 }
1435
Changbin Du76638d92018-01-16 17:02:29 +08001436 parser->idx = 0;
1437
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001438 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001439 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001440 *ppos += read;
1441 ret = read;
1442 goto out;
1443 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001444 }
1445
1446 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001447 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001448 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001449 parser->buffer[parser->idx++] = ch;
1450 else {
1451 ret = -EINVAL;
1452 goto out;
1453 }
1454 ret = get_user(ch, ubuf++);
1455 if (ret)
1456 goto out;
1457 read++;
1458 cnt--;
1459 }
1460
1461 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001462 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001463 parser->buffer[parser->idx] = 0;
1464 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001465 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001466 parser->cont = true;
1467 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001468 /* Make sure the parsed string always terminates with '\0'. */
1469 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001470 } else {
1471 ret = -EINVAL;
1472 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001473 }
1474
1475 *ppos += read;
1476 ret = read;
1477
1478out:
1479 return ret;
1480}
1481
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001482/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001483static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001484{
1485 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001486
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001487 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001488 return -EBUSY;
1489
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001490 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001491 if (cnt > len)
1492 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001493 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001494
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001495 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001496 return cnt;
1497}
1498
Tim Bird0e950172010-02-25 15:36:43 -08001499unsigned long __read_mostly tracing_thresh;
1500
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001501#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001502/*
1503 * Copy the new maximum trace into the separate maximum-trace
1504 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001505 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001506 */
1507static void
1508__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1509{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001510 struct trace_buffer *trace_buf = &tr->trace_buffer;
1511 struct trace_buffer *max_buf = &tr->max_buffer;
1512 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1513 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001514
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001515 max_buf->cpu = cpu;
1516 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001517
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001518 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001519 max_data->critical_start = data->critical_start;
1520 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001521
Tom Zanussi85f726a2019-03-05 10:12:00 -06001522 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001523 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001524 /*
1525 * If tsk == current, then use current_uid(), as that does not use
1526 * RCU. The irq tracer can be called out of RCU scope.
1527 */
1528 if (tsk == current)
1529 max_data->uid = current_uid();
1530 else
1531 max_data->uid = task_uid(tsk);
1532
Steven Rostedt8248ac02009-09-02 12:27:41 -04001533 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1534 max_data->policy = tsk->policy;
1535 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001536
1537 /* record this tasks comm */
1538 tracing_record_cmdline(tsk);
1539}
1540
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001541/**
1542 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1543 * @tr: tracer
1544 * @tsk: the task with the latency
1545 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001546 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001547 *
1548 * Flip the buffers between the @tr and the max_tr and record information
1549 * about which task was the cause of this latency.
1550 */
Ingo Molnare309b412008-05-12 21:20:51 +02001551void
Tom Zanussia35873a2019-02-13 17:42:45 -06001552update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1553 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001554{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001555 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001556 return;
1557
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001558 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001559
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001560 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001561 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001562 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001563 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001564 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001565
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001566 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001567
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001568 /* Inherit the recordable setting from trace_buffer */
1569 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1570 ring_buffer_record_on(tr->max_buffer.buffer);
1571 else
1572 ring_buffer_record_off(tr->max_buffer.buffer);
1573
Tom Zanussia35873a2019-02-13 17:42:45 -06001574#ifdef CONFIG_TRACER_SNAPSHOT
1575 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1576 goto out_unlock;
1577#endif
Gustavo A. R. Silva08ae88f2018-02-09 11:53:16 -06001578 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001579
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001581
1582 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001583 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001584}
1585
1586/**
1587 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001588 * @tr: tracer
1589 * @tsk: task with the latency
1590 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001591 *
1592 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593 */
Ingo Molnare309b412008-05-12 21:20:51 +02001594void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1596{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001597 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001599 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001600 return;
1601
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001602 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001603 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001604 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001605 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001606 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001607 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001608
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001609 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001611 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001612
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001613 if (ret == -EBUSY) {
1614 /*
1615 * We failed to swap the buffer due to a commit taking
1616 * place on this CPU. We fail to record, but we reset
1617 * the max trace buffer (no one writes directly to it)
1618 * and flag that it failed.
1619 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001620 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001621 "Failed to swap buffers due to commit in progress\n");
1622 }
1623
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001624 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001625
1626 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001627 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001628}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001629#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001631static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001632{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001633 /* Iterators are static, they should be filled or empty */
1634 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001635 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001636
Rabin Vincente30f53a2014-11-10 19:46:34 +01001637 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1638 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001639}
1640
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001641#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001642static bool selftests_can_run;
1643
1644struct trace_selftests {
1645 struct list_head list;
1646 struct tracer *type;
1647};
1648
1649static LIST_HEAD(postponed_selftests);
1650
1651static int save_selftest(struct tracer *type)
1652{
1653 struct trace_selftests *selftest;
1654
1655 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1656 if (!selftest)
1657 return -ENOMEM;
1658
1659 selftest->type = type;
1660 list_add(&selftest->list, &postponed_selftests);
1661 return 0;
1662}
1663
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001664static int run_tracer_selftest(struct tracer *type)
1665{
1666 struct trace_array *tr = &global_trace;
1667 struct tracer *saved_tracer = tr->current_trace;
1668 int ret;
1669
1670 if (!type->selftest || tracing_selftest_disabled)
1671 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001672
1673 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001674 * If a tracer registers early in boot up (before scheduling is
1675 * initialized and such), then do not run its selftests yet.
1676 * Instead, run it a little later in the boot process.
1677 */
1678 if (!selftests_can_run)
1679 return save_selftest(type);
1680
1681 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001682 * Run a selftest on this tracer.
1683 * Here we reset the trace buffer, and set the current
1684 * tracer to be this tracer. The tracer can then run some
1685 * internal tracing to verify that everything is in order.
1686 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001687 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001688 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001689
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001690 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001691
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001692#ifdef CONFIG_TRACER_MAX_TRACE
1693 if (type->use_max_tr) {
1694 /* If we expanded the buffers, make sure the max is expanded too */
1695 if (ring_buffer_expanded)
1696 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1697 RING_BUFFER_ALL_CPUS);
1698 tr->allocated_snapshot = true;
1699 }
1700#endif
1701
1702 /* the test is responsible for initializing and enabling */
1703 pr_info("Testing tracer %s: ", type->name);
1704 ret = type->selftest(type, tr);
1705 /* the test is responsible for resetting too */
1706 tr->current_trace = saved_tracer;
1707 if (ret) {
1708 printk(KERN_CONT "FAILED!\n");
1709 /* Add the warning after printing 'FAILED' */
1710 WARN_ON(1);
1711 return -1;
1712 }
1713 /* Only reset on passing, to avoid touching corrupted buffers */
1714 tracing_reset_online_cpus(&tr->trace_buffer);
1715
1716#ifdef CONFIG_TRACER_MAX_TRACE
1717 if (type->use_max_tr) {
1718 tr->allocated_snapshot = false;
1719
1720 /* Shrink the max buffer again */
1721 if (ring_buffer_expanded)
1722 ring_buffer_resize(tr->max_buffer.buffer, 1,
1723 RING_BUFFER_ALL_CPUS);
1724 }
1725#endif
1726
1727 printk(KERN_CONT "PASSED\n");
1728 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001729}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001730
1731static __init int init_trace_selftests(void)
1732{
1733 struct trace_selftests *p, *n;
1734 struct tracer *t, **last;
1735 int ret;
1736
1737 selftests_can_run = true;
1738
1739 mutex_lock(&trace_types_lock);
1740
1741 if (list_empty(&postponed_selftests))
1742 goto out;
1743
1744 pr_info("Running postponed tracer tests:\n");
1745
1746 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001747 /* This loop can take minutes when sanitizers are enabled, so
1748 * lets make sure we allow RCU processing.
1749 */
1750 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001751 ret = run_tracer_selftest(p->type);
1752 /* If the test fails, then warn and remove from available_tracers */
1753 if (ret < 0) {
1754 WARN(1, "tracer: %s failed selftest, disabling\n",
1755 p->type->name);
1756 last = &trace_types;
1757 for (t = trace_types; t; t = t->next) {
1758 if (t == p->type) {
1759 *last = t->next;
1760 break;
1761 }
1762 last = &t->next;
1763 }
1764 }
1765 list_del(&p->list);
1766 kfree(p);
1767 }
1768
1769 out:
1770 mutex_unlock(&trace_types_lock);
1771
1772 return 0;
1773}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001774core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001775#else
1776static inline int run_tracer_selftest(struct tracer *type)
1777{
1778 return 0;
1779}
1780#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001781
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001782static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1783
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001784static void __init apply_trace_boot_options(void);
1785
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001786/**
1787 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001788 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001789 *
1790 * Register a new plugin tracer.
1791 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001792int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001793{
1794 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001795 int ret = 0;
1796
1797 if (!type->name) {
1798 pr_info("Tracer must have a name\n");
1799 return -1;
1800 }
1801
Dan Carpenter24a461d2010-07-10 12:06:44 +02001802 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001803 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1804 return -1;
1805 }
1806
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001807 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001808
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001809 tracing_selftest_running = true;
1810
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001811 for (t = trace_types; t; t = t->next) {
1812 if (strcmp(type->name, t->name) == 0) {
1813 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001814 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001815 type->name);
1816 ret = -1;
1817 goto out;
1818 }
1819 }
1820
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001821 if (!type->set_flag)
1822 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001823 if (!type->flags) {
1824 /*allocate a dummy tracer_flags*/
1825 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001826 if (!type->flags) {
1827 ret = -ENOMEM;
1828 goto out;
1829 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001830 type->flags->val = 0;
1831 type->flags->opts = dummy_tracer_opt;
1832 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001833 if (!type->flags->opts)
1834 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001835
Chunyu Hud39cdd22016-03-08 21:37:01 +08001836 /* store the tracer for __set_tracer_option */
1837 type->flags->trace = type;
1838
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001839 ret = run_tracer_selftest(type);
1840 if (ret < 0)
1841 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001842
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001843 type->next = trace_types;
1844 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001845 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001846
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001848 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001849 mutex_unlock(&trace_types_lock);
1850
Steven Rostedtdac74942009-02-05 01:13:38 -05001851 if (ret || !default_bootup_tracer)
1852 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001853
Li Zefanee6c2c12009-09-18 14:06:47 +08001854 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001855 goto out_unlock;
1856
1857 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1858 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001859 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001860 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001861
1862 apply_trace_boot_options();
1863
Steven Rostedtdac74942009-02-05 01:13:38 -05001864 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001865 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001866#ifdef CONFIG_FTRACE_STARTUP_TEST
1867 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1868 type->name);
1869#endif
1870
1871 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001872 return ret;
1873}
1874
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04001875static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001876{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001877 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001878
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001879 if (!buffer)
1880 return;
1881
Steven Rostedtf6339032009-09-04 12:35:16 -04001882 ring_buffer_record_disable(buffer);
1883
1884 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001885 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001886 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001887
1888 ring_buffer_record_enable(buffer);
1889}
1890
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001891void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001892{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001893 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001894 int cpu;
1895
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001896 if (!buffer)
1897 return;
1898
Steven Rostedt621968c2009-09-04 12:02:35 -04001899 ring_buffer_record_disable(buffer);
1900
1901 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001902 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04001903
Alexander Z Lam94571582013-08-02 18:36:16 -07001904 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001905
1906 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001907 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001908
1909 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001910}
1911
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001912/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001913void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001914{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001915 struct trace_array *tr;
1916
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001917 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001918 if (!tr->clear_trace)
1919 continue;
1920 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001921 tracing_reset_online_cpus(&tr->trace_buffer);
1922#ifdef CONFIG_TRACER_MAX_TRACE
1923 tracing_reset_online_cpus(&tr->max_buffer);
1924#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001925 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001926}
1927
Joel Fernandesd914ba32017-06-26 19:01:55 -07001928static int *tgid_map;
1929
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001930#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001931#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001932static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001933struct saved_cmdlines_buffer {
1934 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1935 unsigned *map_cmdline_to_pid;
1936 unsigned cmdline_num;
1937 int cmdline_idx;
1938 char *saved_cmdlines;
1939};
1940static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001941
Steven Rostedt25b0b442008-05-12 21:21:00 +02001942/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001943static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001944
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001945static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001946{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001947 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1948}
1949
1950static inline void set_cmdline(int idx, const char *cmdline)
1951{
Tom Zanussi85f726a2019-03-05 10:12:00 -06001952 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001953}
1954
1955static int allocate_cmdlines_buffer(unsigned int val,
1956 struct saved_cmdlines_buffer *s)
1957{
Kees Cook6da2ec52018-06-12 13:55:00 -07001958 s->map_cmdline_to_pid = kmalloc_array(val,
1959 sizeof(*s->map_cmdline_to_pid),
1960 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001961 if (!s->map_cmdline_to_pid)
1962 return -ENOMEM;
1963
Kees Cook6da2ec52018-06-12 13:55:00 -07001964 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001965 if (!s->saved_cmdlines) {
1966 kfree(s->map_cmdline_to_pid);
1967 return -ENOMEM;
1968 }
1969
1970 s->cmdline_idx = 0;
1971 s->cmdline_num = val;
1972 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1973 sizeof(s->map_pid_to_cmdline));
1974 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1975 val * sizeof(*s->map_cmdline_to_pid));
1976
1977 return 0;
1978}
1979
1980static int trace_create_savedcmd(void)
1981{
1982 int ret;
1983
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001984 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001985 if (!savedcmd)
1986 return -ENOMEM;
1987
1988 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1989 if (ret < 0) {
1990 kfree(savedcmd);
1991 savedcmd = NULL;
1992 return -ENOMEM;
1993 }
1994
1995 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001996}
1997
Carsten Emdeb5130b12009-09-13 01:43:07 +02001998int is_tracing_stopped(void)
1999{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002000 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002001}
2002
Steven Rostedt0f048702008-11-05 16:05:44 -05002003/**
2004 * tracing_start - quick start of the tracer
2005 *
2006 * If tracing is enabled but was stopped by tracing_stop,
2007 * this will start the tracer back up.
2008 */
2009void tracing_start(void)
2010{
2011 struct ring_buffer *buffer;
2012 unsigned long flags;
2013
2014 if (tracing_disabled)
2015 return;
2016
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002017 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2018 if (--global_trace.stop_count) {
2019 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002020 /* Someone screwed up their debugging */
2021 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002022 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002023 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002024 goto out;
2025 }
2026
Steven Rostedta2f80712010-03-12 19:56:00 -05002027 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002028 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002029
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002030 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002031 if (buffer)
2032 ring_buffer_record_enable(buffer);
2033
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002034#ifdef CONFIG_TRACER_MAX_TRACE
2035 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002036 if (buffer)
2037 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002038#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002039
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002040 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002041
Steven Rostedt0f048702008-11-05 16:05:44 -05002042 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002043 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2044}
2045
2046static void tracing_start_tr(struct trace_array *tr)
2047{
2048 struct ring_buffer *buffer;
2049 unsigned long flags;
2050
2051 if (tracing_disabled)
2052 return;
2053
2054 /* If global, we need to also start the max tracer */
2055 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2056 return tracing_start();
2057
2058 raw_spin_lock_irqsave(&tr->start_lock, flags);
2059
2060 if (--tr->stop_count) {
2061 if (tr->stop_count < 0) {
2062 /* Someone screwed up their debugging */
2063 WARN_ON_ONCE(1);
2064 tr->stop_count = 0;
2065 }
2066 goto out;
2067 }
2068
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002069 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002070 if (buffer)
2071 ring_buffer_record_enable(buffer);
2072
2073 out:
2074 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002075}
2076
2077/**
2078 * tracing_stop - quick stop of the tracer
2079 *
2080 * Light weight way to stop tracing. Use in conjunction with
2081 * tracing_start.
2082 */
2083void tracing_stop(void)
2084{
2085 struct ring_buffer *buffer;
2086 unsigned long flags;
2087
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002088 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2089 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002090 goto out;
2091
Steven Rostedta2f80712010-03-12 19:56:00 -05002092 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002093 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002094
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002095 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002096 if (buffer)
2097 ring_buffer_record_disable(buffer);
2098
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002099#ifdef CONFIG_TRACER_MAX_TRACE
2100 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002101 if (buffer)
2102 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002103#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002104
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002105 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002106
Steven Rostedt0f048702008-11-05 16:05:44 -05002107 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002108 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2109}
2110
2111static void tracing_stop_tr(struct trace_array *tr)
2112{
2113 struct ring_buffer *buffer;
2114 unsigned long flags;
2115
2116 /* If global, we need to also stop the max tracer */
2117 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2118 return tracing_stop();
2119
2120 raw_spin_lock_irqsave(&tr->start_lock, flags);
2121 if (tr->stop_count++)
2122 goto out;
2123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002124 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002125 if (buffer)
2126 ring_buffer_record_disable(buffer);
2127
2128 out:
2129 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002130}
2131
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002132static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133{
Carsten Emdea635cf02009-03-18 09:00:41 +01002134 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002135
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002136 /* treat recording of idle task as a success */
2137 if (!tsk->pid)
2138 return 1;
2139
2140 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002141 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002142
2143 /*
2144 * It's not the end of the world if we don't get
2145 * the lock, but we also don't want to spin
2146 * nor do we want to disable interrupts,
2147 * so if we miss here, then better luck next time.
2148 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002149 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002150 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002151
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002152 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002153 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002154 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002155
Carsten Emdea635cf02009-03-18 09:00:41 +01002156 /*
2157 * Check whether the cmdline buffer at idx has a pid
2158 * mapped. We are going to overwrite that entry so we
2159 * need to clear the map_pid_to_cmdline. Otherwise we
2160 * would read the new comm for the old pid.
2161 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002162 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002163 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002164 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002165
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002166 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2167 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002168
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002169 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002170 }
2171
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002172 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002173
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002174 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002175
2176 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002177}
2178
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002179static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002180{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002181 unsigned map;
2182
Steven Rostedt4ca530852009-03-16 19:20:15 -04002183 if (!pid) {
2184 strcpy(comm, "<idle>");
2185 return;
2186 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002187
Steven Rostedt74bf4072010-01-25 15:11:53 -05002188 if (WARN_ON_ONCE(pid < 0)) {
2189 strcpy(comm, "<XXX>");
2190 return;
2191 }
2192
Steven Rostedt4ca530852009-03-16 19:20:15 -04002193 if (pid > PID_MAX_DEFAULT) {
2194 strcpy(comm, "<...>");
2195 return;
2196 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002197
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002198 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002199 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302200 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002201 else
2202 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002203}
2204
2205void trace_find_cmdline(int pid, char comm[])
2206{
2207 preempt_disable();
2208 arch_spin_lock(&trace_cmdline_lock);
2209
2210 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002211
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002212 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002213 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002214}
2215
Joel Fernandesd914ba32017-06-26 19:01:55 -07002216int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002217{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002218 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2219 return 0;
2220
2221 return tgid_map[pid];
2222}
2223
2224static int trace_save_tgid(struct task_struct *tsk)
2225{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002226 /* treat recording of idle task as a success */
2227 if (!tsk->pid)
2228 return 1;
2229
2230 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002231 return 0;
2232
2233 tgid_map[tsk->pid] = tsk->tgid;
2234 return 1;
2235}
2236
2237static bool tracing_record_taskinfo_skip(int flags)
2238{
2239 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2240 return true;
2241 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2242 return true;
2243 if (!__this_cpu_read(trace_taskinfo_save))
2244 return true;
2245 return false;
2246}
2247
2248/**
2249 * tracing_record_taskinfo - record the task info of a task
2250 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002251 * @task: task to record
2252 * @flags: TRACE_RECORD_CMDLINE for recording comm
2253 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002254 */
2255void tracing_record_taskinfo(struct task_struct *task, int flags)
2256{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002257 bool done;
2258
Joel Fernandesd914ba32017-06-26 19:01:55 -07002259 if (tracing_record_taskinfo_skip(flags))
2260 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002261
2262 /*
2263 * Record as much task information as possible. If some fail, continue
2264 * to try to record the others.
2265 */
2266 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2267 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2268
2269 /* If recording any information failed, retry again soon. */
2270 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002271 return;
2272
Joel Fernandesd914ba32017-06-26 19:01:55 -07002273 __this_cpu_write(trace_taskinfo_save, false);
2274}
2275
2276/**
2277 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2278 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002279 * @prev: previous task during sched_switch
2280 * @next: next task during sched_switch
2281 * @flags: TRACE_RECORD_CMDLINE for recording comm
2282 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002283 */
2284void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2285 struct task_struct *next, int flags)
2286{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002287 bool done;
2288
Joel Fernandesd914ba32017-06-26 19:01:55 -07002289 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002290 return;
2291
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002292 /*
2293 * Record as much task information as possible. If some fail, continue
2294 * to try to record the others.
2295 */
2296 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2297 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2298 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2299 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002300
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002301 /* If recording any information failed, retry again soon. */
2302 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002303 return;
2304
2305 __this_cpu_write(trace_taskinfo_save, false);
2306}
2307
2308/* Helpers to record a specific task information */
2309void tracing_record_cmdline(struct task_struct *task)
2310{
2311 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2312}
2313
2314void tracing_record_tgid(struct task_struct *task)
2315{
2316 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317}
2318
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002319/*
2320 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2321 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2322 * simplifies those functions and keeps them in sync.
2323 */
2324enum print_line_t trace_handle_return(struct trace_seq *s)
2325{
2326 return trace_seq_has_overflowed(s) ?
2327 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2328}
2329EXPORT_SYMBOL_GPL(trace_handle_return);
2330
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002331void
Cong Wang46710f32019-05-25 09:57:59 -07002332tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2333 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002334{
2335 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002336
Steven Rostedt777e2082008-09-29 23:02:42 -04002337 entry->preempt_count = pc & 0xff;
2338 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002339 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002340 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002341#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002342 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002343#else
2344 TRACE_FLAG_IRQS_NOSUPPORT |
2345#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002346 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302348 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002349 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2350 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002351}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002352EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353
Steven Rostedte77405a2009-09-02 14:17:06 -04002354struct ring_buffer_event *
2355trace_buffer_lock_reserve(struct ring_buffer *buffer,
2356 int type,
2357 unsigned long len,
2358 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002359{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002360 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002361}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002362
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002363DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2364DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2365static int trace_buffered_event_ref;
2366
2367/**
2368 * trace_buffered_event_enable - enable buffering events
2369 *
2370 * When events are being filtered, it is quicker to use a temporary
2371 * buffer to write the event data into if there's a likely chance
2372 * that it will not be committed. The discard of the ring buffer
2373 * is not as fast as committing, and is much slower than copying
2374 * a commit.
2375 *
2376 * When an event is to be filtered, allocate per cpu buffers to
2377 * write the event data into, and if the event is filtered and discarded
2378 * it is simply dropped, otherwise, the entire data is to be committed
2379 * in one shot.
2380 */
2381void trace_buffered_event_enable(void)
2382{
2383 struct ring_buffer_event *event;
2384 struct page *page;
2385 int cpu;
2386
2387 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2388
2389 if (trace_buffered_event_ref++)
2390 return;
2391
2392 for_each_tracing_cpu(cpu) {
2393 page = alloc_pages_node(cpu_to_node(cpu),
2394 GFP_KERNEL | __GFP_NORETRY, 0);
2395 if (!page)
2396 goto failed;
2397
2398 event = page_address(page);
2399 memset(event, 0, sizeof(*event));
2400
2401 per_cpu(trace_buffered_event, cpu) = event;
2402
2403 preempt_disable();
2404 if (cpu == smp_processor_id() &&
2405 this_cpu_read(trace_buffered_event) !=
2406 per_cpu(trace_buffered_event, cpu))
2407 WARN_ON_ONCE(1);
2408 preempt_enable();
2409 }
2410
2411 return;
2412 failed:
2413 trace_buffered_event_disable();
2414}
2415
2416static void enable_trace_buffered_event(void *data)
2417{
2418 /* Probably not needed, but do it anyway */
2419 smp_rmb();
2420 this_cpu_dec(trace_buffered_event_cnt);
2421}
2422
2423static void disable_trace_buffered_event(void *data)
2424{
2425 this_cpu_inc(trace_buffered_event_cnt);
2426}
2427
2428/**
2429 * trace_buffered_event_disable - disable buffering events
2430 *
2431 * When a filter is removed, it is faster to not use the buffered
2432 * events, and to commit directly into the ring buffer. Free up
2433 * the temp buffers when there are no more users. This requires
2434 * special synchronization with current events.
2435 */
2436void trace_buffered_event_disable(void)
2437{
2438 int cpu;
2439
2440 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2441
2442 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2443 return;
2444
2445 if (--trace_buffered_event_ref)
2446 return;
2447
2448 preempt_disable();
2449 /* For each CPU, set the buffer as used. */
2450 smp_call_function_many(tracing_buffer_mask,
2451 disable_trace_buffered_event, NULL, 1);
2452 preempt_enable();
2453
2454 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002455 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002456
2457 for_each_tracing_cpu(cpu) {
2458 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2459 per_cpu(trace_buffered_event, cpu) = NULL;
2460 }
2461 /*
2462 * Make sure trace_buffered_event is NULL before clearing
2463 * trace_buffered_event_cnt.
2464 */
2465 smp_wmb();
2466
2467 preempt_disable();
2468 /* Do the work on each cpu */
2469 smp_call_function_many(tracing_buffer_mask,
2470 enable_trace_buffered_event, NULL, 1);
2471 preempt_enable();
2472}
2473
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002474static struct ring_buffer *temp_buffer;
2475
Steven Rostedtef5580d2009-02-27 19:38:04 -05002476struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002477trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002478 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002479 int type, unsigned long len,
2480 unsigned long flags, int pc)
2481{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002482 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002483 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002484
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002485 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002486
Tom Zanussi00b41452018-01-15 20:51:39 -06002487 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002488 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2489 (entry = this_cpu_read(trace_buffered_event))) {
2490 /* Try to use the per cpu buffer first */
2491 val = this_cpu_inc_return(trace_buffered_event_cnt);
2492 if (val == 1) {
2493 trace_event_setup(entry, type, flags, pc);
2494 entry->array[0] = len;
2495 return entry;
2496 }
2497 this_cpu_dec(trace_buffered_event_cnt);
2498 }
2499
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002500 entry = __trace_buffer_lock_reserve(*current_rb,
2501 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002502 /*
2503 * If tracing is off, but we have triggers enabled
2504 * we still need to look at the event data. Use the temp_buffer
2505 * to store the trace event for the tigger to use. It's recusive
2506 * safe and will not be recorded anywhere.
2507 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002508 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002509 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002510 entry = __trace_buffer_lock_reserve(*current_rb,
2511 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002512 }
2513 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002514}
2515EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2516
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002517static DEFINE_SPINLOCK(tracepoint_iter_lock);
2518static DEFINE_MUTEX(tracepoint_printk_mutex);
2519
2520static void output_printk(struct trace_event_buffer *fbuffer)
2521{
2522 struct trace_event_call *event_call;
2523 struct trace_event *event;
2524 unsigned long flags;
2525 struct trace_iterator *iter = tracepoint_print_iter;
2526
2527 /* We should never get here if iter is NULL */
2528 if (WARN_ON_ONCE(!iter))
2529 return;
2530
2531 event_call = fbuffer->trace_file->event_call;
2532 if (!event_call || !event_call->event.funcs ||
2533 !event_call->event.funcs->trace)
2534 return;
2535
2536 event = &fbuffer->trace_file->event_call->event;
2537
2538 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2539 trace_seq_init(&iter->seq);
2540 iter->ent = fbuffer->entry;
2541 event_call->event.funcs->trace(iter, 0, event);
2542 trace_seq_putc(&iter->seq, 0);
2543 printk("%s", iter->seq.buffer);
2544
2545 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2546}
2547
2548int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2549 void __user *buffer, size_t *lenp,
2550 loff_t *ppos)
2551{
2552 int save_tracepoint_printk;
2553 int ret;
2554
2555 mutex_lock(&tracepoint_printk_mutex);
2556 save_tracepoint_printk = tracepoint_printk;
2557
2558 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2559
2560 /*
2561 * This will force exiting early, as tracepoint_printk
2562 * is always zero when tracepoint_printk_iter is not allocated
2563 */
2564 if (!tracepoint_print_iter)
2565 tracepoint_printk = 0;
2566
2567 if (save_tracepoint_printk == tracepoint_printk)
2568 goto out;
2569
2570 if (tracepoint_printk)
2571 static_key_enable(&tracepoint_printk_key.key);
2572 else
2573 static_key_disable(&tracepoint_printk_key.key);
2574
2575 out:
2576 mutex_unlock(&tracepoint_printk_mutex);
2577
2578 return ret;
2579}
2580
2581void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2582{
2583 if (static_key_false(&tracepoint_printk_key.key))
2584 output_printk(fbuffer);
2585
2586 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2587 fbuffer->event, fbuffer->entry,
2588 fbuffer->flags, fbuffer->pc);
2589}
2590EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2591
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002592/*
2593 * Skip 3:
2594 *
2595 * trace_buffer_unlock_commit_regs()
2596 * trace_event_buffer_commit()
2597 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302598 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002599# define STACK_SKIP 3
2600
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002601void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2602 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002603 struct ring_buffer_event *event,
2604 unsigned long flags, int pc,
2605 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002606{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002607 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002608
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002609 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002610 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002611 * Note, we can still get here via blktrace, wakeup tracer
2612 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002613 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002614 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002615 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002616 ftrace_trace_userstack(buffer, flags, pc);
2617}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002618
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002619/*
2620 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2621 */
2622void
2623trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2624 struct ring_buffer_event *event)
2625{
2626 __buffer_unlock_commit(buffer, event);
2627}
2628
Chunyan Zhang478409d2016-11-21 15:57:18 +08002629static void
2630trace_process_export(struct trace_export *export,
2631 struct ring_buffer_event *event)
2632{
2633 struct trace_entry *entry;
2634 unsigned int size = 0;
2635
2636 entry = ring_buffer_event_data(event);
2637 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002638 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002639}
2640
2641static DEFINE_MUTEX(ftrace_export_lock);
2642
2643static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2644
2645static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2646
2647static inline void ftrace_exports_enable(void)
2648{
2649 static_branch_enable(&ftrace_exports_enabled);
2650}
2651
2652static inline void ftrace_exports_disable(void)
2653{
2654 static_branch_disable(&ftrace_exports_enabled);
2655}
2656
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002657static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002658{
2659 struct trace_export *export;
2660
2661 preempt_disable_notrace();
2662
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002663 export = rcu_dereference_raw_check(ftrace_exports_list);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002664 while (export) {
2665 trace_process_export(export, event);
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002666 export = rcu_dereference_raw_check(export->next);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002667 }
2668
2669 preempt_enable_notrace();
2670}
2671
2672static inline void
2673add_trace_export(struct trace_export **list, struct trace_export *export)
2674{
2675 rcu_assign_pointer(export->next, *list);
2676 /*
2677 * We are entering export into the list but another
2678 * CPU might be walking that list. We need to make sure
2679 * the export->next pointer is valid before another CPU sees
2680 * the export pointer included into the list.
2681 */
2682 rcu_assign_pointer(*list, export);
2683}
2684
2685static inline int
2686rm_trace_export(struct trace_export **list, struct trace_export *export)
2687{
2688 struct trace_export **p;
2689
2690 for (p = list; *p != NULL; p = &(*p)->next)
2691 if (*p == export)
2692 break;
2693
2694 if (*p != export)
2695 return -1;
2696
2697 rcu_assign_pointer(*p, (*p)->next);
2698
2699 return 0;
2700}
2701
2702static inline void
2703add_ftrace_export(struct trace_export **list, struct trace_export *export)
2704{
2705 if (*list == NULL)
2706 ftrace_exports_enable();
2707
2708 add_trace_export(list, export);
2709}
2710
2711static inline int
2712rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2713{
2714 int ret;
2715
2716 ret = rm_trace_export(list, export);
2717 if (*list == NULL)
2718 ftrace_exports_disable();
2719
2720 return ret;
2721}
2722
2723int register_ftrace_export(struct trace_export *export)
2724{
2725 if (WARN_ON_ONCE(!export->write))
2726 return -1;
2727
2728 mutex_lock(&ftrace_export_lock);
2729
2730 add_ftrace_export(&ftrace_exports_list, export);
2731
2732 mutex_unlock(&ftrace_export_lock);
2733
2734 return 0;
2735}
2736EXPORT_SYMBOL_GPL(register_ftrace_export);
2737
2738int unregister_ftrace_export(struct trace_export *export)
2739{
2740 int ret;
2741
2742 mutex_lock(&ftrace_export_lock);
2743
2744 ret = rm_ftrace_export(&ftrace_exports_list, export);
2745
2746 mutex_unlock(&ftrace_export_lock);
2747
2748 return ret;
2749}
2750EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2751
Ingo Molnare309b412008-05-12 21:20:51 +02002752void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002753trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002754 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2755 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002756{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002757 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002758 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002759 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002760 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002761
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002762 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2763 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002764 if (!event)
2765 return;
2766 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002767 entry->ip = ip;
2768 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002769
Chunyan Zhang478409d2016-11-21 15:57:18 +08002770 if (!call_filter_check_discard(call, entry, buffer, event)) {
2771 if (static_branch_unlikely(&ftrace_exports_enabled))
2772 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002773 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002774 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002775}
2776
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002777#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002778
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002779/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2780#define FTRACE_KSTACK_NESTING 4
2781
2782#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2783
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002784struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002785 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002786};
2787
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002788
2789struct ftrace_stacks {
2790 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2791};
2792
2793static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002794static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2795
Steven Rostedte77405a2009-09-02 14:17:06 -04002796static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002797 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002798 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002799{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002800 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002801 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002802 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002803 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002804 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002805 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002806
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002807 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002808 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002809 * If regs is set, then these functions will not be in the way.
2810 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002811#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002812 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002813 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002814#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002815
2816 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002817 * Since events can happen in NMIs there's no safe way to
2818 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2819 * or NMI comes in, it will just have to use the default
2820 * FTRACE_STACK_SIZE.
2821 */
2822 preempt_disable_notrace();
2823
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002824 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2825
2826 /* This should never happen. If it does, yell once and skip */
2827 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2828 goto out;
2829
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002830 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002831 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2832 * interrupt will either see the value pre increment or post
2833 * increment. If the interrupt happens pre increment it will have
2834 * restored the counter when it returns. We just need a barrier to
2835 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002836 */
2837 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002838
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002839 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002840 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002841
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002842 if (regs) {
2843 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2844 size, skip);
2845 } else {
2846 nr_entries = stack_trace_save(fstack->calls, size, skip);
2847 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002848
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002849 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002850 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2851 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002852 if (!event)
2853 goto out;
2854 entry = ring_buffer_event_data(event);
2855
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002856 memcpy(&entry->caller, fstack->calls, size);
2857 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002858
Tom Zanussif306cc82013-10-24 08:34:17 -05002859 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002860 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002861
2862 out:
2863 /* Again, don't let gcc optimize things here */
2864 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002865 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002866 preempt_enable_notrace();
2867
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002868}
2869
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002870static inline void ftrace_trace_stack(struct trace_array *tr,
2871 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002872 unsigned long flags,
2873 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002874{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002875 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002876 return;
2877
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002878 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002879}
2880
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002881void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2882 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002883{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002884 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2885
2886 if (rcu_is_watching()) {
2887 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2888 return;
2889 }
2890
2891 /*
2892 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2893 * but if the above rcu_is_watching() failed, then the NMI
2894 * triggered someplace critical, and rcu_irq_enter() should
2895 * not be called from NMI.
2896 */
2897 if (unlikely(in_nmi()))
2898 return;
2899
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002900 rcu_irq_enter_irqson();
2901 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2902 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002903}
2904
Steven Rostedt03889382009-12-11 09:48:22 -05002905/**
2906 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002907 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002908 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002909void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002910{
2911 unsigned long flags;
2912
2913 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002914 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002915
2916 local_save_flags(flags);
2917
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002918#ifndef CONFIG_UNWINDER_ORC
2919 /* Skip 1 to skip this function. */
2920 skip++;
2921#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002922 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2923 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002924}
Nikolay Borisovda387e52018-10-17 09:51:43 +03002925EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05002926
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002927#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01002928static DEFINE_PER_CPU(int, user_stack_count);
2929
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002930static void
Steven Rostedte77405a2009-09-02 14:17:06 -04002931ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002932{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002933 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002934 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002935 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02002936
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002937 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002938 return;
2939
Steven Rostedtb6345872010-03-12 20:03:30 -05002940 /*
2941 * NMIs can not handle page faults, even with fix ups.
2942 * The save user stack can (and often does) fault.
2943 */
2944 if (unlikely(in_nmi()))
2945 return;
2946
Steven Rostedt91e86e52010-11-10 12:56:12 +01002947 /*
2948 * prevent recursion, since the user stack tracing may
2949 * trigger other kernel events.
2950 */
2951 preempt_disable();
2952 if (__this_cpu_read(user_stack_count))
2953 goto out;
2954
2955 __this_cpu_inc(user_stack_count);
2956
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002957 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2958 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002959 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002960 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002961 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002962
Steven Rostedt48659d32009-09-11 11:36:23 -04002963 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002964 memset(&entry->caller, 0, sizeof(entry->caller));
2965
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002966 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05002967 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002968 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002969
Li Zefan1dbd1952010-12-09 15:47:56 +08002970 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002971 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002972 out:
2973 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002974}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002975#else /* CONFIG_USER_STACKTRACE_SUPPORT */
2976static void ftrace_trace_userstack(struct ring_buffer *buffer,
2977 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002978{
Török Edwin02b67512008-11-22 13:28:47 +02002979}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002980#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02002981
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002982#endif /* CONFIG_STACKTRACE */
2983
Steven Rostedt07d777f2011-09-22 14:01:55 -04002984/* created for use with alloc_percpu */
2985struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002986 int nesting;
2987 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002988};
2989
2990static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002991
2992/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002993 * Thise allows for lockless recording. If we're nested too deeply, then
2994 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002995 */
2996static char *get_trace_buf(void)
2997{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002998 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002999
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003000 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003001 return NULL;
3002
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003003 buffer->nesting++;
3004
3005 /* Interrupts must see nesting incremented before we use the buffer */
3006 barrier();
3007 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003008}
3009
3010static void put_trace_buf(void)
3011{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003012 /* Don't let the decrement of nesting leak before this */
3013 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003014 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003015}
3016
3017static int alloc_percpu_trace_buffer(void)
3018{
3019 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003020
3021 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003022 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3023 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003024
3025 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003026 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003027}
3028
Steven Rostedt81698832012-10-11 10:15:05 -04003029static int buffers_allocated;
3030
Steven Rostedt07d777f2011-09-22 14:01:55 -04003031void trace_printk_init_buffers(void)
3032{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003033 if (buffers_allocated)
3034 return;
3035
3036 if (alloc_percpu_trace_buffer())
3037 return;
3038
Steven Rostedt2184db42014-05-28 13:14:40 -04003039 /* trace_printk() is for debug use only. Don't use it in production. */
3040
Joe Perchesa395d6a2016-03-22 14:28:09 -07003041 pr_warn("\n");
3042 pr_warn("**********************************************************\n");
3043 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3044 pr_warn("** **\n");
3045 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3046 pr_warn("** **\n");
3047 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3048 pr_warn("** unsafe for production use. **\n");
3049 pr_warn("** **\n");
3050 pr_warn("** If you see this message and you are not debugging **\n");
3051 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3052 pr_warn("** **\n");
3053 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3054 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003055
Steven Rostedtb382ede62012-10-10 21:44:34 -04003056 /* Expand the buffers to set size */
3057 tracing_update_buffers();
3058
Steven Rostedt07d777f2011-09-22 14:01:55 -04003059 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003060
3061 /*
3062 * trace_printk_init_buffers() can be called by modules.
3063 * If that happens, then we need to start cmdline recording
3064 * directly here. If the global_trace.buffer is already
3065 * allocated here, then this was called by module code.
3066 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003067 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003068 tracing_start_cmdline_record();
3069}
Divya Indif45d1222019-03-20 11:28:51 -07003070EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003071
3072void trace_printk_start_comm(void)
3073{
3074 /* Start tracing comms if trace printk is set */
3075 if (!buffers_allocated)
3076 return;
3077 tracing_start_cmdline_record();
3078}
3079
3080static void trace_printk_start_stop_comm(int enabled)
3081{
3082 if (!buffers_allocated)
3083 return;
3084
3085 if (enabled)
3086 tracing_start_cmdline_record();
3087 else
3088 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003089}
3090
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003091/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003092 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003093 * @ip: The address of the caller
3094 * @fmt: The string format to write to the buffer
3095 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003096 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003097int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003098{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003099 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003100 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003101 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003102 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003103 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003104 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003105 char *tbuffer;
3106 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003107
3108 if (unlikely(tracing_selftest_running || tracing_disabled))
3109 return 0;
3110
3111 /* Don't pollute graph traces with trace_vprintk internals */
3112 pause_graph_tracing();
3113
3114 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003115 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003116
Steven Rostedt07d777f2011-09-22 14:01:55 -04003117 tbuffer = get_trace_buf();
3118 if (!tbuffer) {
3119 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003120 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003121 }
3122
3123 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3124
3125 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003126 goto out;
3127
Steven Rostedt07d777f2011-09-22 14:01:55 -04003128 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003129 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003130 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3132 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003133 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003134 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003135 entry = ring_buffer_event_data(event);
3136 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003137 entry->fmt = fmt;
3138
Steven Rostedt07d777f2011-09-22 14:01:55 -04003139 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003140 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003141 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003142 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003143 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003144
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003145out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003146 put_trace_buf();
3147
3148out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003149 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003150 unpause_graph_tracing();
3151
3152 return len;
3153}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003154EXPORT_SYMBOL_GPL(trace_vbprintk);
3155
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003156__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003157static int
3158__trace_array_vprintk(struct ring_buffer *buffer,
3159 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003160{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003161 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003162 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003163 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003164 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003165 unsigned long flags;
3166 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003167
3168 if (tracing_disabled || tracing_selftest_running)
3169 return 0;
3170
Steven Rostedt07d777f2011-09-22 14:01:55 -04003171 /* Don't pollute graph traces with trace_vprintk internals */
3172 pause_graph_tracing();
3173
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003174 pc = preempt_count();
3175 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003176
Steven Rostedt07d777f2011-09-22 14:01:55 -04003177
3178 tbuffer = get_trace_buf();
3179 if (!tbuffer) {
3180 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003181 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003182 }
3183
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003184 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003185
Steven Rostedt07d777f2011-09-22 14:01:55 -04003186 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003187 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003188 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3189 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003190 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003191 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003192 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003193 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003194
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003195 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003196 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003197 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003198 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003199 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003200
3201out:
3202 put_trace_buf();
3203
3204out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003205 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003206 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003207
3208 return len;
3209}
Steven Rostedt659372d2009-09-03 19:11:07 -04003210
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003211__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003212int trace_array_vprintk(struct trace_array *tr,
3213 unsigned long ip, const char *fmt, va_list args)
3214{
3215 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3216}
3217
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003218__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003219int trace_array_printk(struct trace_array *tr,
3220 unsigned long ip, const char *fmt, ...)
3221{
3222 int ret;
3223 va_list ap;
3224
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003225 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003226 return 0;
3227
3228 va_start(ap, fmt);
3229 ret = trace_array_vprintk(tr, ip, fmt, ap);
3230 va_end(ap);
3231 return ret;
3232}
Divya Indif45d1222019-03-20 11:28:51 -07003233EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003234
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003235__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003236int trace_array_printk_buf(struct ring_buffer *buffer,
3237 unsigned long ip, const char *fmt, ...)
3238{
3239 int ret;
3240 va_list ap;
3241
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003242 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003243 return 0;
3244
3245 va_start(ap, fmt);
3246 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3247 va_end(ap);
3248 return ret;
3249}
3250
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003251__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003252int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3253{
Steven Rostedta813a152009-10-09 01:41:35 -04003254 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003255}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003256EXPORT_SYMBOL_GPL(trace_vprintk);
3257
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003258static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003259{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003260 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3261
Steven Rostedt5a90f572008-09-03 17:42:51 -04003262 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003263 if (buf_iter)
3264 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003265}
3266
Ingo Molnare309b412008-05-12 21:20:51 +02003267static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003268peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3269 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003270{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003271 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003272 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003273
Steven Rostedtd7690412008-10-01 00:29:53 -04003274 if (buf_iter)
3275 event = ring_buffer_iter_peek(buf_iter, ts);
3276 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003277 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003278 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003279
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003280 if (event) {
3281 iter->ent_size = ring_buffer_event_length(event);
3282 return ring_buffer_event_data(event);
3283 }
3284 iter->ent_size = 0;
3285 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003286}
Steven Rostedtd7690412008-10-01 00:29:53 -04003287
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003288static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003289__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3290 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003291{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003292 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003293 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003294 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003295 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003296 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003297 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003298 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003299 int cpu;
3300
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003301 /*
3302 * If we are in a per_cpu trace file, don't bother by iterating over
3303 * all cpu and peek directly.
3304 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003305 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003306 if (ring_buffer_empty_cpu(buffer, cpu_file))
3307 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003308 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003309 if (ent_cpu)
3310 *ent_cpu = cpu_file;
3311
3312 return ent;
3313 }
3314
Steven Rostedtab464282008-05-12 21:21:00 +02003315 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003316
3317 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003318 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003319
Steven Rostedtbc21b472010-03-31 19:49:26 -04003320 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003321
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003322 /*
3323 * Pick the entry with the smallest timestamp:
3324 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003325 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003326 next = ent;
3327 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003328 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003329 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003330 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003331 }
3332 }
3333
Steven Rostedt12b5da32012-03-27 10:43:28 -04003334 iter->ent_size = next_size;
3335
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003336 if (ent_cpu)
3337 *ent_cpu = next_cpu;
3338
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003339 if (ent_ts)
3340 *ent_ts = next_ts;
3341
Steven Rostedtbc21b472010-03-31 19:49:26 -04003342 if (missing_events)
3343 *missing_events = next_lost;
3344
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003345 return next;
3346}
3347
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003348/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003349struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3350 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003351{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003352 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003353}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003354
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003355/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003356void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003357{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003358 iter->ent = __find_next_entry(iter, &iter->cpu,
3359 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003360
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003361 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003362 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003363
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003364 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003365}
3366
Ingo Molnare309b412008-05-12 21:20:51 +02003367static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003368{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003369 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003370 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003371}
3372
Ingo Molnare309b412008-05-12 21:20:51 +02003373static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003374{
3375 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003376 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003377 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003378
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003379 WARN_ON_ONCE(iter->leftover);
3380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003381 (*pos)++;
3382
3383 /* can't go backwards */
3384 if (iter->idx > i)
3385 return NULL;
3386
3387 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003388 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003389 else
3390 ent = iter;
3391
3392 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003393 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003394
3395 iter->pos = *pos;
3396
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003397 return ent;
3398}
3399
Jason Wessel955b61e2010-08-05 09:22:23 -05003400void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003401{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003402 struct ring_buffer_event *event;
3403 struct ring_buffer_iter *buf_iter;
3404 unsigned long entries = 0;
3405 u64 ts;
3406
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003408
Steven Rostedt6d158a82012-06-27 20:46:14 -04003409 buf_iter = trace_buffer_iter(iter, cpu);
3410 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003411 return;
3412
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003413 ring_buffer_iter_reset(buf_iter);
3414
3415 /*
3416 * We could have the case with the max latency tracers
3417 * that a reset never took place on a cpu. This is evident
3418 * by the timestamp being before the start of the buffer.
3419 */
3420 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003421 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003422 break;
3423 entries++;
3424 ring_buffer_read(buf_iter, NULL);
3425 }
3426
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003428}
3429
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003430/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003431 * The current tracer is copied to avoid a global locking
3432 * all around.
3433 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003434static void *s_start(struct seq_file *m, loff_t *pos)
3435{
3436 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003437 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003438 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 void *p = NULL;
3440 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003441 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003443 /*
3444 * copy the tracer to avoid using a global lock all around.
3445 * iter->trace is a copy of current_trace, the pointer to the
3446 * name may be used instead of a strcmp(), as iter->trace->name
3447 * will point to the same string as current_trace->name.
3448 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003449 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003450 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3451 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003452 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003454#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003455 if (iter->snapshot && iter->trace->use_max_tr)
3456 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003457#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003458
3459 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003460 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003461
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462 if (*pos != iter->pos) {
3463 iter->ent = NULL;
3464 iter->cpu = 0;
3465 iter->idx = -1;
3466
Steven Rostedtae3b5092013-01-23 15:22:59 -05003467 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003468 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003469 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003470 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003471 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003472
Lai Jiangshanac91d852010-03-02 17:54:50 +08003473 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3475 ;
3476
3477 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003478 /*
3479 * If we overflowed the seq_file before, then we want
3480 * to just reuse the trace_seq buffer again.
3481 */
3482 if (iter->leftover)
3483 p = iter;
3484 else {
3485 l = *pos - 1;
3486 p = s_next(m, p, &l);
3487 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488 }
3489
Lai Jiangshan4f535962009-05-18 19:35:34 +08003490 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003491 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003492 return p;
3493}
3494
3495static void s_stop(struct seq_file *m, void *p)
3496{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003497 struct trace_iterator *iter = m->private;
3498
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003499#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003500 if (iter->snapshot && iter->trace->use_max_tr)
3501 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003502#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003503
3504 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003505 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003506
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003507 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003508 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003509}
3510
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003511static void
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003512get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3513 unsigned long *entries, int cpu)
3514{
3515 unsigned long count;
3516
3517 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3518 /*
3519 * If this buffer has skipped entries, then we hold all
3520 * entries for the trace and we need to ignore the
3521 * ones before the time stamp.
3522 */
3523 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3524 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3525 /* total is the same as the entries */
3526 *total = count;
3527 } else
3528 *total = count +
3529 ring_buffer_overrun_cpu(buf->buffer, cpu);
3530 *entries = count;
3531}
3532
3533static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003534get_total_entries(struct trace_buffer *buf,
3535 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003536{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003537 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003538 int cpu;
3539
3540 *total = 0;
3541 *entries = 0;
3542
3543 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003544 get_total_entries_cpu(buf, &t, &e, cpu);
3545 *total += t;
3546 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003547 }
3548}
3549
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003550unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3551{
3552 unsigned long total, entries;
3553
3554 if (!tr)
3555 tr = &global_trace;
3556
3557 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3558
3559 return entries;
3560}
3561
3562unsigned long trace_total_entries(struct trace_array *tr)
3563{
3564 unsigned long total, entries;
3565
3566 if (!tr)
3567 tr = &global_trace;
3568
3569 get_total_entries(&tr->trace_buffer, &total, &entries);
3570
3571 return entries;
3572}
3573
Ingo Molnare309b412008-05-12 21:20:51 +02003574static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003576 seq_puts(m, "# _------=> CPU# \n"
3577 "# / _-----=> irqs-off \n"
3578 "# | / _----=> need-resched \n"
3579 "# || / _---=> hardirq/softirq \n"
3580 "# ||| / _--=> preempt-depth \n"
3581 "# |||| / delay \n"
3582 "# cmd pid ||||| time | caller \n"
3583 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003584}
3585
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003586static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003588 unsigned long total;
3589 unsigned long entries;
3590
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003591 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003592 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3593 entries, total, num_online_cpus());
3594 seq_puts(m, "#\n");
3595}
3596
Joel Fernandes441dae82017-06-25 22:38:43 -07003597static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3598 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003599{
Joel Fernandes441dae82017-06-25 22:38:43 -07003600 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3601
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003602 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003603
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003604 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3605 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003606}
3607
Joel Fernandes441dae82017-06-25 22:38:43 -07003608static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3609 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003610{
Joel Fernandes441dae82017-06-25 22:38:43 -07003611 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003612 const char *space = " ";
3613 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003614
Quentin Perret9e738212019-02-14 15:29:50 +00003615 print_event_info(buf, m);
3616
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003617 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3618 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3619 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3620 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3621 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3622 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3623 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003624}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003625
Jiri Olsa62b915f2010-04-02 19:01:22 +02003626void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3628{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003629 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003630 struct trace_buffer *buf = iter->trace_buffer;
3631 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003632 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003633 unsigned long entries;
3634 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003635 const char *name = "preemption";
3636
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003637 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003639 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003640
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003641 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003642 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003643 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003644 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003645 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003646 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003647 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003648 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003649 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003650 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003651#if defined(CONFIG_PREEMPT_NONE)
3652 "server",
3653#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3654 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003655#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003656 "preempt",
3657#else
3658 "unknown",
3659#endif
3660 /* These are reserved for later use */
3661 0, 0, 0, 0);
3662#ifdef CONFIG_SMP
3663 seq_printf(m, " #P:%d)\n", num_online_cpus());
3664#else
3665 seq_puts(m, ")\n");
3666#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003667 seq_puts(m, "# -----------------\n");
3668 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003669 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003670 data->comm, data->pid,
3671 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003672 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003673 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003674
3675 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003676 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003677 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3678 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003679 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003680 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3681 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003682 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003683 }
3684
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003685 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003686}
3687
Steven Rostedta3097202008-11-07 22:36:02 -05003688static void test_cpu_buff_start(struct trace_iterator *iter)
3689{
3690 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003691 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003692
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003693 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003694 return;
3695
3696 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3697 return;
3698
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003699 if (cpumask_available(iter->started) &&
3700 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003701 return;
3702
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003703 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003704 return;
3705
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003706 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003707 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003708
3709 /* Don't print started cpu buffer for the first entry of the trace */
3710 if (iter->idx > 1)
3711 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3712 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003713}
3714
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003715static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003716{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003717 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003718 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003719 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003720 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003721 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003722
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003723 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003724
Steven Rostedta3097202008-11-07 22:36:02 -05003725 test_cpu_buff_start(iter);
3726
Steven Rostedtf633cef2008-12-23 23:24:13 -05003727 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003728
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003729 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003730 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3731 trace_print_lat_context(iter);
3732 else
3733 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003734 }
3735
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003736 if (trace_seq_has_overflowed(s))
3737 return TRACE_TYPE_PARTIAL_LINE;
3738
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003739 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003740 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003741
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003742 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003743
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003744 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003745}
3746
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003747static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003748{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003749 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003750 struct trace_seq *s = &iter->seq;
3751 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003752 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003753
3754 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003755
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003756 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003757 trace_seq_printf(s, "%d %d %llu ",
3758 entry->pid, iter->cpu, iter->ts);
3759
3760 if (trace_seq_has_overflowed(s))
3761 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003762
Steven Rostedtf633cef2008-12-23 23:24:13 -05003763 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003764 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003765 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003766
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003767 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003768
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003769 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003770}
3771
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003772static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003773{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003774 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003775 struct trace_seq *s = &iter->seq;
3776 unsigned char newline = '\n';
3777 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003778 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003779
3780 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003781
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003782 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003783 SEQ_PUT_HEX_FIELD(s, entry->pid);
3784 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3785 SEQ_PUT_HEX_FIELD(s, iter->ts);
3786 if (trace_seq_has_overflowed(s))
3787 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003788 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003789
Steven Rostedtf633cef2008-12-23 23:24:13 -05003790 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003791 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003792 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003793 if (ret != TRACE_TYPE_HANDLED)
3794 return ret;
3795 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003796
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003797 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003798
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003799 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003800}
3801
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003802static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003803{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003804 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003805 struct trace_seq *s = &iter->seq;
3806 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003807 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003808
3809 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003810
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003811 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003812 SEQ_PUT_FIELD(s, entry->pid);
3813 SEQ_PUT_FIELD(s, iter->cpu);
3814 SEQ_PUT_FIELD(s, iter->ts);
3815 if (trace_seq_has_overflowed(s))
3816 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003817 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003818
Steven Rostedtf633cef2008-12-23 23:24:13 -05003819 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003820 return event ? event->funcs->binary(iter, 0, event) :
3821 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003822}
3823
Jiri Olsa62b915f2010-04-02 19:01:22 +02003824int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003825{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003826 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003827 int cpu;
3828
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003829 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003830 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003831 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003832 buf_iter = trace_buffer_iter(iter, cpu);
3833 if (buf_iter) {
3834 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003835 return 0;
3836 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003837 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003838 return 0;
3839 }
3840 return 1;
3841 }
3842
Steven Rostedtab464282008-05-12 21:21:00 +02003843 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003844 buf_iter = trace_buffer_iter(iter, cpu);
3845 if (buf_iter) {
3846 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003847 return 0;
3848 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003849 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003850 return 0;
3851 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003852 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003853
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003854 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003855}
3856
Lai Jiangshan4f535962009-05-18 19:35:34 +08003857/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003858enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003859{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003860 struct trace_array *tr = iter->tr;
3861 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003862 enum print_line_t ret;
3863
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003864 if (iter->lost_events) {
3865 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3866 iter->cpu, iter->lost_events);
3867 if (trace_seq_has_overflowed(&iter->seq))
3868 return TRACE_TYPE_PARTIAL_LINE;
3869 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003870
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003871 if (iter->trace && iter->trace->print_line) {
3872 ret = iter->trace->print_line(iter);
3873 if (ret != TRACE_TYPE_UNHANDLED)
3874 return ret;
3875 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003876
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003877 if (iter->ent->type == TRACE_BPUTS &&
3878 trace_flags & TRACE_ITER_PRINTK &&
3879 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3880 return trace_print_bputs_msg_only(iter);
3881
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003882 if (iter->ent->type == TRACE_BPRINT &&
3883 trace_flags & TRACE_ITER_PRINTK &&
3884 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003885 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003886
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003887 if (iter->ent->type == TRACE_PRINT &&
3888 trace_flags & TRACE_ITER_PRINTK &&
3889 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003890 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003891
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003892 if (trace_flags & TRACE_ITER_BIN)
3893 return print_bin_fmt(iter);
3894
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003895 if (trace_flags & TRACE_ITER_HEX)
3896 return print_hex_fmt(iter);
3897
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003898 if (trace_flags & TRACE_ITER_RAW)
3899 return print_raw_fmt(iter);
3900
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003901 return print_trace_fmt(iter);
3902}
3903
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003904void trace_latency_header(struct seq_file *m)
3905{
3906 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003907 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003908
3909 /* print nothing if the buffers are empty */
3910 if (trace_empty(iter))
3911 return;
3912
3913 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3914 print_trace_header(m, iter);
3915
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003916 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003917 print_lat_help_header(m);
3918}
3919
Jiri Olsa62b915f2010-04-02 19:01:22 +02003920void trace_default_header(struct seq_file *m)
3921{
3922 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003923 struct trace_array *tr = iter->tr;
3924 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003925
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003926 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3927 return;
3928
Jiri Olsa62b915f2010-04-02 19:01:22 +02003929 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3930 /* print nothing if the buffers are empty */
3931 if (trace_empty(iter))
3932 return;
3933 print_trace_header(m, iter);
3934 if (!(trace_flags & TRACE_ITER_VERBOSE))
3935 print_lat_help_header(m);
3936 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003937 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3938 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003939 print_func_help_header_irq(iter->trace_buffer,
3940 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003941 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003942 print_func_help_header(iter->trace_buffer, m,
3943 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003944 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003945 }
3946}
3947
Steven Rostedte0a413f2011-09-29 21:26:16 -04003948static void test_ftrace_alive(struct seq_file *m)
3949{
3950 if (!ftrace_is_dead())
3951 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003952 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3953 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003954}
3955
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003956#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003957static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003958{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003959 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3960 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3961 "# Takes a snapshot of the main buffer.\n"
3962 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3963 "# (Doesn't have to be '2' works with any number that\n"
3964 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003965}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003966
3967static void show_snapshot_percpu_help(struct seq_file *m)
3968{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003969 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003970#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003971 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3972 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003973#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003974 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3975 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003976#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003977 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3978 "# (Doesn't have to be '2' works with any number that\n"
3979 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003980}
3981
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003982static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3983{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003984 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003985 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003986 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003987 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003988
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003989 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003990 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3991 show_snapshot_main_help(m);
3992 else
3993 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003994}
3995#else
3996/* Should never be called */
3997static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3998#endif
3999
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004000static int s_show(struct seq_file *m, void *v)
4001{
4002 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004003 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004004
4005 if (iter->ent == NULL) {
4006 if (iter->tr) {
4007 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4008 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004009 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004010 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004011 if (iter->snapshot && trace_empty(iter))
4012 print_snapshot_help(m, iter);
4013 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004014 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004015 else
4016 trace_default_header(m);
4017
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004018 } else if (iter->leftover) {
4019 /*
4020 * If we filled the seq_file buffer earlier, we
4021 * want to just show it now.
4022 */
4023 ret = trace_print_seq(m, &iter->seq);
4024
4025 /* ret should this time be zero, but you never know */
4026 iter->leftover = ret;
4027
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004028 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004029 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004030 ret = trace_print_seq(m, &iter->seq);
4031 /*
4032 * If we overflow the seq_file buffer, then it will
4033 * ask us for this data again at start up.
4034 * Use that instead.
4035 * ret is 0 if seq_file write succeeded.
4036 * -1 otherwise.
4037 */
4038 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004039 }
4040
4041 return 0;
4042}
4043
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004044/*
4045 * Should be used after trace_array_get(), trace_types_lock
4046 * ensures that i_cdev was already initialized.
4047 */
4048static inline int tracing_get_cpu(struct inode *inode)
4049{
4050 if (inode->i_cdev) /* See trace_create_cpu_file() */
4051 return (long)inode->i_cdev - 1;
4052 return RING_BUFFER_ALL_CPUS;
4053}
4054
James Morris88e9d342009-09-22 16:43:43 -07004055static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004056 .start = s_start,
4057 .next = s_next,
4058 .stop = s_stop,
4059 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004060};
4061
Ingo Molnare309b412008-05-12 21:20:51 +02004062static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004063__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004064{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004065 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004066 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004067 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004068
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004069 if (tracing_disabled)
4070 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004071
Jiri Olsa50e18b92012-04-25 10:23:39 +02004072 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004073 if (!iter)
4074 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004075
Gil Fruchter72917232015-06-09 10:32:35 +03004076 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004077 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004078 if (!iter->buffer_iter)
4079 goto release;
4080
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004081 /*
4082 * We make a copy of the current tracer to avoid concurrent
4083 * changes on it while we are reading.
4084 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004085 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004086 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004087 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004088 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004089
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004090 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004091
Li Zefan79f55992009-06-15 14:58:26 +08004092 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004093 goto fail;
4094
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004095 iter->tr = tr;
4096
4097#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004098 /* Currently only the top directory has a snapshot */
4099 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004100 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004101 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004102#endif
4103 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004104 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004105 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004106 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004107 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004109 /* Notify the tracer early; before we stop tracing. */
4110 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004111 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004112
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004113 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004114 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004115 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4116
David Sharp8be07092012-11-13 12:18:22 -08004117 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004118 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004119 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4120
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004121 /* stop the trace while dumping if we are not opening "snapshot" */
4122 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004123 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004124
Steven Rostedtae3b5092013-01-23 15:22:59 -05004125 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004126 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004127 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004128 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4129 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004130 }
4131 ring_buffer_read_prepare_sync();
4132 for_each_tracing_cpu(cpu) {
4133 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004134 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004135 }
4136 } else {
4137 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004138 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004139 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4140 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004141 ring_buffer_read_prepare_sync();
4142 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004143 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004144 }
4145
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004146 mutex_unlock(&trace_types_lock);
4147
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004148 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004149
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004150 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004151 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004152 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004153 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004154release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004155 seq_release_private(inode, file);
4156 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004157}
4158
4159int tracing_open_generic(struct inode *inode, struct file *filp)
4160{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004161 int ret;
4162
4163 ret = tracing_check_open_get_tr(NULL);
4164 if (ret)
4165 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004166
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004167 filp->private_data = inode->i_private;
4168 return 0;
4169}
4170
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004171bool tracing_is_disabled(void)
4172{
4173 return (tracing_disabled) ? true: false;
4174}
4175
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004176/*
4177 * Open and update trace_array ref count.
4178 * Must have the current trace_array passed to it.
4179 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004180int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004181{
4182 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004183 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004184
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004185 ret = tracing_check_open_get_tr(tr);
4186 if (ret)
4187 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004188
4189 filp->private_data = inode->i_private;
4190
4191 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004192}
4193
Hannes Eder4fd27352009-02-10 19:44:12 +01004194static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004196 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004197 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004198 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004199 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004200
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004201 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004202 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004203 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004204 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004205
Oleg Nesterov6484c712013-07-23 17:26:10 +02004206 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004207 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004208 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004209
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004210 for_each_tracing_cpu(cpu) {
4211 if (iter->buffer_iter[cpu])
4212 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4213 }
4214
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004215 if (iter->trace && iter->trace->close)
4216 iter->trace->close(iter);
4217
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004218 if (!iter->snapshot)
4219 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004220 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004221
4222 __trace_array_put(tr);
4223
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224 mutex_unlock(&trace_types_lock);
4225
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004226 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004227 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004228 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004229 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004230 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004231
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232 return 0;
4233}
4234
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004235static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4236{
4237 struct trace_array *tr = inode->i_private;
4238
4239 trace_array_put(tr);
4240 return 0;
4241}
4242
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004243static int tracing_single_release_tr(struct inode *inode, struct file *file)
4244{
4245 struct trace_array *tr = inode->i_private;
4246
4247 trace_array_put(tr);
4248
4249 return single_release(inode, file);
4250}
4251
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252static int tracing_open(struct inode *inode, struct file *file)
4253{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004254 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004255 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004256 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004257
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004258 ret = tracing_check_open_get_tr(tr);
4259 if (ret)
4260 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004261
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004262 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004263 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4264 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004265 struct trace_buffer *trace_buf = &tr->trace_buffer;
4266
4267#ifdef CONFIG_TRACER_MAX_TRACE
4268 if (tr->current_trace->print_max)
4269 trace_buf = &tr->max_buffer;
4270#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004271
4272 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004273 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004274 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004275 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004276 }
4277
4278 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004279 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004280 if (IS_ERR(iter))
4281 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004282 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004283 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4284 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004285
4286 if (ret < 0)
4287 trace_array_put(tr);
4288
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004289 return ret;
4290}
4291
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004292/*
4293 * Some tracers are not suitable for instance buffers.
4294 * A tracer is always available for the global array (toplevel)
4295 * or if it explicitly states that it is.
4296 */
4297static bool
4298trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4299{
4300 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4301}
4302
4303/* Find the next tracer that this trace array may use */
4304static struct tracer *
4305get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4306{
4307 while (t && !trace_ok_for_array(t, tr))
4308 t = t->next;
4309
4310 return t;
4311}
4312
Ingo Molnare309b412008-05-12 21:20:51 +02004313static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004314t_next(struct seq_file *m, void *v, loff_t *pos)
4315{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004316 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004317 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004318
4319 (*pos)++;
4320
4321 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004322 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004323
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004324 return t;
4325}
4326
4327static void *t_start(struct seq_file *m, loff_t *pos)
4328{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004329 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004330 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004331 loff_t l = 0;
4332
4333 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004334
4335 t = get_tracer_for_array(tr, trace_types);
4336 for (; t && l < *pos; t = t_next(m, t, &l))
4337 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004338
4339 return t;
4340}
4341
4342static void t_stop(struct seq_file *m, void *p)
4343{
4344 mutex_unlock(&trace_types_lock);
4345}
4346
4347static int t_show(struct seq_file *m, void *v)
4348{
4349 struct tracer *t = v;
4350
4351 if (!t)
4352 return 0;
4353
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004354 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004355 if (t->next)
4356 seq_putc(m, ' ');
4357 else
4358 seq_putc(m, '\n');
4359
4360 return 0;
4361}
4362
James Morris88e9d342009-09-22 16:43:43 -07004363static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004364 .start = t_start,
4365 .next = t_next,
4366 .stop = t_stop,
4367 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004368};
4369
4370static int show_traces_open(struct inode *inode, struct file *file)
4371{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004372 struct trace_array *tr = inode->i_private;
4373 struct seq_file *m;
4374 int ret;
4375
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004376 ret = tracing_check_open_get_tr(tr);
4377 if (ret)
4378 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004379
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004380 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004381 if (ret) {
4382 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004383 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004384 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004385
4386 m = file->private_data;
4387 m->private = tr;
4388
4389 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004390}
4391
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004392static int show_traces_release(struct inode *inode, struct file *file)
4393{
4394 struct trace_array *tr = inode->i_private;
4395
4396 trace_array_put(tr);
4397 return seq_release(inode, file);
4398}
4399
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004400static ssize_t
4401tracing_write_stub(struct file *filp, const char __user *ubuf,
4402 size_t count, loff_t *ppos)
4403{
4404 return count;
4405}
4406
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004407loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004408{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004409 int ret;
4410
Slava Pestov364829b2010-11-24 15:13:16 -08004411 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004412 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004413 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004414 file->f_pos = ret = 0;
4415
4416 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004417}
4418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004419static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004420 .open = tracing_open,
4421 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004422 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004423 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004424 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004425};
4426
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004427static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004428 .open = show_traces_open,
4429 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004430 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004431 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004432};
4433
4434static ssize_t
4435tracing_cpumask_read(struct file *filp, char __user *ubuf,
4436 size_t count, loff_t *ppos)
4437{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004438 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004439 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004440 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004441
Changbin Du90e406f2017-11-30 11:39:43 +08004442 len = snprintf(NULL, 0, "%*pb\n",
4443 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4444 mask_str = kmalloc(len, GFP_KERNEL);
4445 if (!mask_str)
4446 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004447
Changbin Du90e406f2017-11-30 11:39:43 +08004448 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004449 cpumask_pr_args(tr->tracing_cpumask));
4450 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004451 count = -EINVAL;
4452 goto out_err;
4453 }
Changbin Du90e406f2017-11-30 11:39:43 +08004454 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004455
4456out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004457 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004458
4459 return count;
4460}
4461
4462static ssize_t
4463tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4464 size_t count, loff_t *ppos)
4465{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004466 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304467 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004468 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304469
4470 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4471 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004472
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304473 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004474 if (err)
4475 goto err_unlock;
4476
Steven Rostedta5e25882008-12-02 15:34:05 -05004477 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004478 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004479 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004480 /*
4481 * Increase/decrease the disabled counter if we are
4482 * about to flip a bit in the cpumask:
4483 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004484 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304485 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004486 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4487 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004488 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004489 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304490 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004491 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4492 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004493 }
4494 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004495 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004496 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004497
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004498 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304499 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004500
Ingo Molnarc7078de2008-05-12 21:20:52 +02004501 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004502
4503err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004504 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004505
4506 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004507}
4508
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004509static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004510 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004511 .read = tracing_cpumask_read,
4512 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004513 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004514 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004515};
4516
Li Zefanfdb372e2009-12-08 11:15:59 +08004517static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004518{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004519 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004520 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004521 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004522 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004523
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004524 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004525 tracer_flags = tr->current_trace->flags->val;
4526 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004527
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004528 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004529 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004530 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004531 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004532 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004533 }
4534
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004535 for (i = 0; trace_opts[i].name; i++) {
4536 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004537 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004538 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004539 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004540 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004541 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004542
Li Zefanfdb372e2009-12-08 11:15:59 +08004543 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004544}
4545
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004546static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004547 struct tracer_flags *tracer_flags,
4548 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004549{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004550 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004551 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004552
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004553 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004554 if (ret)
4555 return ret;
4556
4557 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004558 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004559 else
Zhaolei77708412009-08-07 18:53:21 +08004560 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004561 return 0;
4562}
4563
Li Zefan8d18eaa2009-12-08 11:17:06 +08004564/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004565static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004566{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004567 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004568 struct tracer_flags *tracer_flags = trace->flags;
4569 struct tracer_opt *opts = NULL;
4570 int i;
4571
4572 for (i = 0; tracer_flags->opts[i].name; i++) {
4573 opts = &tracer_flags->opts[i];
4574
4575 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004576 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004577 }
4578
4579 return -EINVAL;
4580}
4581
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004582/* Some tracers require overwrite to stay enabled */
4583int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4584{
4585 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4586 return -1;
4587
4588 return 0;
4589}
4590
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004591int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004592{
4593 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004594 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004595 return 0;
4596
4597 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004598 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004599 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004600 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004601
4602 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004603 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004604 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004605 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004606
4607 if (mask == TRACE_ITER_RECORD_CMD)
4608 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004609
Joel Fernandesd914ba32017-06-26 19:01:55 -07004610 if (mask == TRACE_ITER_RECORD_TGID) {
4611 if (!tgid_map)
Kees Cook6396bb22018-06-12 14:03:40 -07004612 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4613 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004614 GFP_KERNEL);
4615 if (!tgid_map) {
4616 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4617 return -ENOMEM;
4618 }
4619
4620 trace_event_enable_tgid_record(enabled);
4621 }
4622
Steven Rostedtc37775d2016-04-13 16:59:18 -04004623 if (mask == TRACE_ITER_EVENT_FORK)
4624 trace_event_follow_fork(tr, enabled);
4625
Namhyung Kim1e104862017-04-17 11:44:28 +09004626 if (mask == TRACE_ITER_FUNC_FORK)
4627 ftrace_pid_follow_fork(tr, enabled);
4628
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004629 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004630 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004631#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004632 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004633#endif
4634 }
Steven Rostedt81698832012-10-11 10:15:05 -04004635
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004636 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004637 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004638 trace_printk_control(enabled);
4639 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004640
4641 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004642}
4643
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004644static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004645{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004646 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004647 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004648 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004649 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004650 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004651
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004652 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004653
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004654 len = str_has_prefix(cmp, "no");
4655 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004656 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004657
4658 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004659
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004660 mutex_lock(&trace_types_lock);
4661
Yisheng Xie591a0332018-05-17 16:36:03 +08004662 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004663 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004664 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004665 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004666 else
4667 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004668
4669 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004670
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004671 /*
4672 * If the first trailing whitespace is replaced with '\0' by strstrip,
4673 * turn it back into a space.
4674 */
4675 if (orig_len > strlen(option))
4676 option[strlen(option)] = ' ';
4677
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004678 return ret;
4679}
4680
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004681static void __init apply_trace_boot_options(void)
4682{
4683 char *buf = trace_boot_options_buf;
4684 char *option;
4685
4686 while (true) {
4687 option = strsep(&buf, ",");
4688
4689 if (!option)
4690 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004691
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004692 if (*option)
4693 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004694
4695 /* Put back the comma to allow this to be called again */
4696 if (buf)
4697 *(buf - 1) = ',';
4698 }
4699}
4700
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004701static ssize_t
4702tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4703 size_t cnt, loff_t *ppos)
4704{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004707 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004708 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004709
4710 if (cnt >= sizeof(buf))
4711 return -EINVAL;
4712
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004713 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004714 return -EFAULT;
4715
Steven Rostedta8dd2172013-01-09 20:54:17 -05004716 buf[cnt] = 0;
4717
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004718 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004719 if (ret < 0)
4720 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004721
Jiri Olsacf8517c2009-10-23 19:36:16 -04004722 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004723
4724 return cnt;
4725}
4726
Li Zefanfdb372e2009-12-08 11:15:59 +08004727static int tracing_trace_options_open(struct inode *inode, struct file *file)
4728{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004729 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004730 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004731
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004732 ret = tracing_check_open_get_tr(tr);
4733 if (ret)
4734 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004735
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004736 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4737 if (ret < 0)
4738 trace_array_put(tr);
4739
4740 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004741}
4742
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004743static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004744 .open = tracing_trace_options_open,
4745 .read = seq_read,
4746 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004747 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004748 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004749};
4750
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004751static const char readme_msg[] =
4752 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004753 "# echo 0 > tracing_on : quick way to disable tracing\n"
4754 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4755 " Important files:\n"
4756 " trace\t\t\t- The static contents of the buffer\n"
4757 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4758 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4759 " current_tracer\t- function and latency tracers\n"
4760 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05004761 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004762 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4763 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4764 " trace_clock\t\t-change the clock used to order events\n"
4765 " local: Per cpu clock but may not be synced across CPUs\n"
4766 " global: Synced across CPUs but slows tracing down.\n"
4767 " counter: Not a clock, but just an increment\n"
4768 " uptime: Jiffy counter from time of boot\n"
4769 " perf: Same clock that perf events use\n"
4770#ifdef CONFIG_X86_64
4771 " x86-tsc: TSC cycle counter\n"
4772#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004773 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4774 " delta: Delta difference against a buffer-wide timestamp\n"
4775 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004776 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004777 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004778 " tracing_cpumask\t- Limit which CPUs to trace\n"
4779 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4780 "\t\t\t Remove sub-buffer with rmdir\n"
4781 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08004782 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004783 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004784 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004785#ifdef CONFIG_DYNAMIC_FTRACE
4786 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004787 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4788 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004789 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004790 "\t modules: Can select a group via module\n"
4791 "\t Format: :mod:<module-name>\n"
4792 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4793 "\t triggers: a command to perform when function is hit\n"
4794 "\t Format: <function>:<trigger>[:count]\n"
4795 "\t trigger: traceon, traceoff\n"
4796 "\t\t enable_event:<system>:<event>\n"
4797 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004798#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004799 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004800#endif
4801#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004802 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004803#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004804 "\t\t dump\n"
4805 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004806 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4807 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4808 "\t The first one will disable tracing every time do_fault is hit\n"
4809 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4810 "\t The first time do trap is hit and it disables tracing, the\n"
4811 "\t counter will decrement to 2. If tracing is already disabled,\n"
4812 "\t the counter will not decrement. It only decrements when the\n"
4813 "\t trigger did work\n"
4814 "\t To remove trigger without count:\n"
4815 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4816 "\t To remove trigger with a count:\n"
4817 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004818 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004819 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4820 "\t modules: Can select a group via module command :mod:\n"
4821 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004822#endif /* CONFIG_DYNAMIC_FTRACE */
4823#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004824 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4825 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004826#endif
4827#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4828 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004829 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004830 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4831#endif
4832#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004833 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4834 "\t\t\t snapshot buffer. Read the contents for more\n"
4835 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004836#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004837#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004838 " stack_trace\t\t- Shows the max stack trace when active\n"
4839 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004840 "\t\t\t Write into this file to reset the max size (trigger a\n"
4841 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004842#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004843 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4844 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004845#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004846#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004847#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004848 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004849 "\t\t\t Write into this file to define/undefine new trace events.\n"
4850#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004851#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004852 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004853 "\t\t\t Write into this file to define/undefine new trace events.\n"
4854#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004855#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09004856 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004857 "\t\t\t Write into this file to define/undefine new trace events.\n"
4858#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004859#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004860 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004861 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4862 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004863#ifdef CONFIG_HIST_TRIGGERS
4864 "\t s:[synthetic/]<event> <field> [<field>]\n"
4865#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004866 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004867#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004868 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304869 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004870#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004871#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05304872 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004873#endif
4874 "\t args: <name>=fetcharg[:type]\n"
4875 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004876#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004877 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004878#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004879 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004880#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09004881 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09004882 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09004883 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09004884 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004885#ifdef CONFIG_HIST_TRIGGERS
4886 "\t field: <stype> <name>;\n"
4887 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4888 "\t [unsigned] char/int/long\n"
4889#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004890#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004891 " events/\t\t- Directory containing all trace event subsystems:\n"
4892 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4893 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004894 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4895 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004896 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004897 " events/<system>/<event>/\t- Directory containing control files for\n"
4898 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004899 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4900 " filter\t\t- If set, only events passing filter are traced\n"
4901 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004902 "\t Format: <trigger>[:count][if <filter>]\n"
4903 "\t trigger: traceon, traceoff\n"
4904 "\t enable_event:<system>:<event>\n"
4905 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004906#ifdef CONFIG_HIST_TRIGGERS
4907 "\t enable_hist:<system>:<event>\n"
4908 "\t disable_hist:<system>:<event>\n"
4909#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004910#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004911 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004912#endif
4913#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004914 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004915#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004916#ifdef CONFIG_HIST_TRIGGERS
4917 "\t\t hist (see below)\n"
4918#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004919 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4920 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4921 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4922 "\t events/block/block_unplug/trigger\n"
4923 "\t The first disables tracing every time block_unplug is hit.\n"
4924 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4925 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4926 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4927 "\t Like function triggers, the counter is only decremented if it\n"
4928 "\t enabled or disabled tracing.\n"
4929 "\t To remove a trigger without a count:\n"
4930 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4931 "\t To remove a trigger with a count:\n"
4932 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4933 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004934#ifdef CONFIG_HIST_TRIGGERS
4935 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004936 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004937 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004938 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004939 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004940 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004941 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004942 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004943 "\t [if <filter>]\n\n"
4944 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004945 "\t table using the key(s) and value(s) named, and the value of a\n"
4946 "\t sum called 'hitcount' is incremented. Keys and values\n"
4947 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004948 "\t can be any field, or the special string 'stacktrace'.\n"
4949 "\t Compound keys consisting of up to two fields can be specified\n"
4950 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4951 "\t fields. Sort keys consisting of up to two fields can be\n"
4952 "\t specified using the 'sort' keyword. The sort direction can\n"
4953 "\t be modified by appending '.descending' or '.ascending' to a\n"
4954 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004955 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4956 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4957 "\t its histogram data will be shared with other triggers of the\n"
4958 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004959 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004960 "\t table in its entirety to stdout. If there are multiple hist\n"
4961 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004962 "\t trigger in the output. The table displayed for a named\n"
4963 "\t trigger will be the same as any other instance having the\n"
4964 "\t same name. The default format used to display a given field\n"
4965 "\t can be modified by appending any of the following modifiers\n"
4966 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004967 "\t .hex display a number as a hex value\n"
4968 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004969 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004970 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06004971 "\t .syscall display a syscall id as a syscall name\n"
4972 "\t .log2 display log2 value rather than raw number\n"
4973 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004974 "\t The 'pause' parameter can be used to pause an existing hist\n"
4975 "\t trigger or to start a hist trigger but not log any events\n"
4976 "\t until told to do so. 'continue' can be used to start or\n"
4977 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004978 "\t The 'clear' parameter will clear the contents of a running\n"
4979 "\t hist trigger and leave its current paused/active state\n"
4980 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004981 "\t The enable_hist and disable_hist triggers can be used to\n"
4982 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00004983 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004984 "\t the enable_event and disable_event triggers.\n\n"
4985 "\t Hist trigger handlers and actions are executed whenever a\n"
4986 "\t a histogram entry is added or updated. They take the form:\n\n"
4987 "\t <handler>.<action>\n\n"
4988 "\t The available handlers are:\n\n"
4989 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06004990 "\t onmax(var) - invoke if var exceeds current max\n"
4991 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004992 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06004993 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004994 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06004995#ifdef CONFIG_TRACER_SNAPSHOT
4996 "\t snapshot() - snapshot the trace buffer\n"
4997#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004998#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004999;
5000
5001static ssize_t
5002tracing_readme_read(struct file *filp, char __user *ubuf,
5003 size_t cnt, loff_t *ppos)
5004{
5005 return simple_read_from_buffer(ubuf, cnt, ppos,
5006 readme_msg, strlen(readme_msg));
5007}
5008
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005009static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005010 .open = tracing_open_generic,
5011 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005012 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005013};
5014
Michael Sartain99c621d2017-07-05 22:07:15 -06005015static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5016{
5017 int *ptr = v;
5018
5019 if (*pos || m->count)
5020 ptr++;
5021
5022 (*pos)++;
5023
5024 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5025 if (trace_find_tgid(*ptr))
5026 return ptr;
5027 }
5028
5029 return NULL;
5030}
5031
5032static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5033{
5034 void *v;
5035 loff_t l = 0;
5036
5037 if (!tgid_map)
5038 return NULL;
5039
5040 v = &tgid_map[0];
5041 while (l <= *pos) {
5042 v = saved_tgids_next(m, v, &l);
5043 if (!v)
5044 return NULL;
5045 }
5046
5047 return v;
5048}
5049
5050static void saved_tgids_stop(struct seq_file *m, void *v)
5051{
5052}
5053
5054static int saved_tgids_show(struct seq_file *m, void *v)
5055{
5056 int pid = (int *)v - tgid_map;
5057
5058 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5059 return 0;
5060}
5061
5062static const struct seq_operations tracing_saved_tgids_seq_ops = {
5063 .start = saved_tgids_start,
5064 .stop = saved_tgids_stop,
5065 .next = saved_tgids_next,
5066 .show = saved_tgids_show,
5067};
5068
5069static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5070{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005071 int ret;
5072
5073 ret = tracing_check_open_get_tr(NULL);
5074 if (ret)
5075 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005076
5077 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5078}
5079
5080
5081static const struct file_operations tracing_saved_tgids_fops = {
5082 .open = tracing_saved_tgids_open,
5083 .read = seq_read,
5084 .llseek = seq_lseek,
5085 .release = seq_release,
5086};
5087
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005088static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005089{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005090 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005091
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005092 if (*pos || m->count)
5093 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005094
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005095 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005096
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005097 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5098 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005099 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005100 continue;
5101
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005102 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005103 }
5104
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005105 return NULL;
5106}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005107
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005108static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5109{
5110 void *v;
5111 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005112
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005113 preempt_disable();
5114 arch_spin_lock(&trace_cmdline_lock);
5115
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005116 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005117 while (l <= *pos) {
5118 v = saved_cmdlines_next(m, v, &l);
5119 if (!v)
5120 return NULL;
5121 }
5122
5123 return v;
5124}
5125
5126static void saved_cmdlines_stop(struct seq_file *m, void *v)
5127{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005128 arch_spin_unlock(&trace_cmdline_lock);
5129 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005130}
5131
5132static int saved_cmdlines_show(struct seq_file *m, void *v)
5133{
5134 char buf[TASK_COMM_LEN];
5135 unsigned int *pid = v;
5136
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005137 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005138 seq_printf(m, "%d %s\n", *pid, buf);
5139 return 0;
5140}
5141
5142static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5143 .start = saved_cmdlines_start,
5144 .next = saved_cmdlines_next,
5145 .stop = saved_cmdlines_stop,
5146 .show = saved_cmdlines_show,
5147};
5148
5149static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5150{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005151 int ret;
5152
5153 ret = tracing_check_open_get_tr(NULL);
5154 if (ret)
5155 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005156
5157 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005158}
5159
5160static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005161 .open = tracing_saved_cmdlines_open,
5162 .read = seq_read,
5163 .llseek = seq_lseek,
5164 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005165};
5166
5167static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005168tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5169 size_t cnt, loff_t *ppos)
5170{
5171 char buf[64];
5172 int r;
5173
5174 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005175 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005176 arch_spin_unlock(&trace_cmdline_lock);
5177
5178 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5179}
5180
5181static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5182{
5183 kfree(s->saved_cmdlines);
5184 kfree(s->map_cmdline_to_pid);
5185 kfree(s);
5186}
5187
5188static int tracing_resize_saved_cmdlines(unsigned int val)
5189{
5190 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5191
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005192 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005193 if (!s)
5194 return -ENOMEM;
5195
5196 if (allocate_cmdlines_buffer(val, s) < 0) {
5197 kfree(s);
5198 return -ENOMEM;
5199 }
5200
5201 arch_spin_lock(&trace_cmdline_lock);
5202 savedcmd_temp = savedcmd;
5203 savedcmd = s;
5204 arch_spin_unlock(&trace_cmdline_lock);
5205 free_saved_cmdlines_buffer(savedcmd_temp);
5206
5207 return 0;
5208}
5209
5210static ssize_t
5211tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5212 size_t cnt, loff_t *ppos)
5213{
5214 unsigned long val;
5215 int ret;
5216
5217 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5218 if (ret)
5219 return ret;
5220
5221 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5222 if (!val || val > PID_MAX_DEFAULT)
5223 return -EINVAL;
5224
5225 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5226 if (ret < 0)
5227 return ret;
5228
5229 *ppos += cnt;
5230
5231 return cnt;
5232}
5233
5234static const struct file_operations tracing_saved_cmdlines_size_fops = {
5235 .open = tracing_open_generic,
5236 .read = tracing_saved_cmdlines_size_read,
5237 .write = tracing_saved_cmdlines_size_write,
5238};
5239
Jeremy Linton681bec02017-05-31 16:56:53 -05005240#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005241static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005242update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005243{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005244 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005245 if (ptr->tail.next) {
5246 ptr = ptr->tail.next;
5247 /* Set ptr to the next real item (skip head) */
5248 ptr++;
5249 } else
5250 return NULL;
5251 }
5252 return ptr;
5253}
5254
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005255static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005256{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005257 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005258
5259 /*
5260 * Paranoid! If ptr points to end, we don't want to increment past it.
5261 * This really should never happen.
5262 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005263 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005264 if (WARN_ON_ONCE(!ptr))
5265 return NULL;
5266
5267 ptr++;
5268
5269 (*pos)++;
5270
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005271 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005272
5273 return ptr;
5274}
5275
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005276static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005277{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005278 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005279 loff_t l = 0;
5280
Jeremy Linton1793ed92017-05-31 16:56:46 -05005281 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005282
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005283 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005284 if (v)
5285 v++;
5286
5287 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005288 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005289 }
5290
5291 return v;
5292}
5293
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005294static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005295{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005296 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005297}
5298
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005299static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005300{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005301 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005302
5303 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005304 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005305 ptr->map.system);
5306
5307 return 0;
5308}
5309
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005310static const struct seq_operations tracing_eval_map_seq_ops = {
5311 .start = eval_map_start,
5312 .next = eval_map_next,
5313 .stop = eval_map_stop,
5314 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005315};
5316
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005317static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005318{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005319 int ret;
5320
5321 ret = tracing_check_open_get_tr(NULL);
5322 if (ret)
5323 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005324
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005325 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005326}
5327
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005328static const struct file_operations tracing_eval_map_fops = {
5329 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005330 .read = seq_read,
5331 .llseek = seq_lseek,
5332 .release = seq_release,
5333};
5334
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005335static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005336trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005337{
5338 /* Return tail of array given the head */
5339 return ptr + ptr->head.length + 1;
5340}
5341
5342static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005343trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005344 int len)
5345{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005346 struct trace_eval_map **stop;
5347 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005348 union trace_eval_map_item *map_array;
5349 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005350
5351 stop = start + len;
5352
5353 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005354 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005355 * where the head holds the module and length of array, and the
5356 * tail holds a pointer to the next list.
5357 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005358 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005359 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005360 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005361 return;
5362 }
5363
Jeremy Linton1793ed92017-05-31 16:56:46 -05005364 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005365
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005366 if (!trace_eval_maps)
5367 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005368 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005369 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005370 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005371 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005372 if (!ptr->tail.next)
5373 break;
5374 ptr = ptr->tail.next;
5375
5376 }
5377 ptr->tail.next = map_array;
5378 }
5379 map_array->head.mod = mod;
5380 map_array->head.length = len;
5381 map_array++;
5382
5383 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5384 map_array->map = **map;
5385 map_array++;
5386 }
5387 memset(map_array, 0, sizeof(*map_array));
5388
Jeremy Linton1793ed92017-05-31 16:56:46 -05005389 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005390}
5391
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005392static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005393{
Jeremy Linton681bec02017-05-31 16:56:53 -05005394 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005395 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005396}
5397
Jeremy Linton681bec02017-05-31 16:56:53 -05005398#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005399static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5400static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005401 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005402#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005403
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005404static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005405 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005406{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005407 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005408
5409 if (len <= 0)
5410 return;
5411
5412 map = start;
5413
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005414 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005415
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005416 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005417}
5418
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005419static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005420tracing_set_trace_read(struct file *filp, char __user *ubuf,
5421 size_t cnt, loff_t *ppos)
5422{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005423 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005424 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005425 int r;
5426
5427 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005428 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005429 mutex_unlock(&trace_types_lock);
5430
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005432}
5433
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005434int tracer_init(struct tracer *t, struct trace_array *tr)
5435{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005436 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005437 return t->init(tr);
5438}
5439
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005440static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005441{
5442 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005443
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005444 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005445 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005446}
5447
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005448#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005449/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005450static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5451 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005452{
5453 int cpu, ret = 0;
5454
5455 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5456 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457 ret = ring_buffer_resize(trace_buf->buffer,
5458 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005459 if (ret < 0)
5460 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005461 per_cpu_ptr(trace_buf->data, cpu)->entries =
5462 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005463 }
5464 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005465 ret = ring_buffer_resize(trace_buf->buffer,
5466 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005467 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005468 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5469 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005470 }
5471
5472 return ret;
5473}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005474#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005475
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005476static int __tracing_resize_ring_buffer(struct trace_array *tr,
5477 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005478{
5479 int ret;
5480
5481 /*
5482 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005483 * we use the size that was given, and we can forget about
5484 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005485 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005486 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005487
Steven Rostedtb382ede62012-10-10 21:44:34 -04005488 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005489 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005490 return 0;
5491
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005492 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005493 if (ret < 0)
5494 return ret;
5495
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005496#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005497 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5498 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005499 goto out;
5500
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005501 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005502 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005503 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5504 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005505 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005506 /*
5507 * AARGH! We are left with different
5508 * size max buffer!!!!
5509 * The max buffer is our "snapshot" buffer.
5510 * When a tracer needs a snapshot (one of the
5511 * latency tracers), it swaps the max buffer
5512 * with the saved snap shot. We succeeded to
5513 * update the size of the main buffer, but failed to
5514 * update the size of the max buffer. But when we tried
5515 * to reset the main buffer to the original size, we
5516 * failed there too. This is very unlikely to
5517 * happen, but if it does, warn and kill all
5518 * tracing.
5519 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005520 WARN_ON(1);
5521 tracing_disabled = 1;
5522 }
5523 return ret;
5524 }
5525
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005526 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005527 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005528 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005529 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005530
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005531 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005532#endif /* CONFIG_TRACER_MAX_TRACE */
5533
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005534 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005535 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005536 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005537 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005538
5539 return ret;
5540}
5541
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005542static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5543 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005544{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005545 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005546
5547 mutex_lock(&trace_types_lock);
5548
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005549 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5550 /* make sure, this cpu is enabled in the mask */
5551 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5552 ret = -EINVAL;
5553 goto out;
5554 }
5555 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005556
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005557 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005558 if (ret < 0)
5559 ret = -ENOMEM;
5560
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005561out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005562 mutex_unlock(&trace_types_lock);
5563
5564 return ret;
5565}
5566
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005567
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005568/**
5569 * tracing_update_buffers - used by tracing facility to expand ring buffers
5570 *
5571 * To save on memory when the tracing is never used on a system with it
5572 * configured in. The ring buffers are set to a minimum size. But once
5573 * a user starts to use the tracing facility, then they need to grow
5574 * to their default size.
5575 *
5576 * This function is to be called when a tracer is about to be used.
5577 */
5578int tracing_update_buffers(void)
5579{
5580 int ret = 0;
5581
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005582 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005583 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005584 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005585 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005586 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005587
5588 return ret;
5589}
5590
Steven Rostedt577b7852009-02-26 23:43:05 -05005591struct trace_option_dentry;
5592
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005593static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005594create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005595
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005596/*
5597 * Used to clear out the tracer before deletion of an instance.
5598 * Must have trace_types_lock held.
5599 */
5600static void tracing_set_nop(struct trace_array *tr)
5601{
5602 if (tr->current_trace == &nop_trace)
5603 return;
5604
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005605 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005606
5607 if (tr->current_trace->reset)
5608 tr->current_trace->reset(tr);
5609
5610 tr->current_trace = &nop_trace;
5611}
5612
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005613static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005614{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005615 /* Only enable if the directory has been created already. */
5616 if (!tr->dir)
5617 return;
5618
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005619 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005620}
5621
5622static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5623{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005624 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005625#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005626 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005627#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005628 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005629
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005630 mutex_lock(&trace_types_lock);
5631
Steven Rostedt73c51622009-03-11 13:42:01 -04005632 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005633 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005634 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005635 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005636 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005637 ret = 0;
5638 }
5639
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005640 for (t = trace_types; t; t = t->next) {
5641 if (strcmp(t->name, buf) == 0)
5642 break;
5643 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005644 if (!t) {
5645 ret = -EINVAL;
5646 goto out;
5647 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005648 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005649 goto out;
5650
Tom Zanussia35873a2019-02-13 17:42:45 -06005651#ifdef CONFIG_TRACER_SNAPSHOT
5652 if (t->use_max_tr) {
5653 arch_spin_lock(&tr->max_lock);
5654 if (tr->cond_snapshot)
5655 ret = -EBUSY;
5656 arch_spin_unlock(&tr->max_lock);
5657 if (ret)
5658 goto out;
5659 }
5660#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005661 /* Some tracers won't work on kernel command line */
5662 if (system_state < SYSTEM_RUNNING && t->noboot) {
5663 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5664 t->name);
5665 goto out;
5666 }
5667
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005668 /* Some tracers are only allowed for the top level buffer */
5669 if (!trace_ok_for_array(t, tr)) {
5670 ret = -EINVAL;
5671 goto out;
5672 }
5673
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005674 /* If trace pipe files are being read, we can't change the tracer */
5675 if (tr->current_trace->ref) {
5676 ret = -EBUSY;
5677 goto out;
5678 }
5679
Steven Rostedt9f029e82008-11-12 15:24:24 -05005680 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005681
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005682 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005683
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005684 if (tr->current_trace->reset)
5685 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005686
Paul E. McKenney74401722018-11-06 18:44:52 -08005687 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005688 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005689
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005690#ifdef CONFIG_TRACER_MAX_TRACE
5691 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005692
5693 if (had_max_tr && !t->use_max_tr) {
5694 /*
5695 * We need to make sure that the update_max_tr sees that
5696 * current_trace changed to nop_trace to keep it from
5697 * swapping the buffers after we resize it.
5698 * The update_max_tr is called from interrupts disabled
5699 * so a synchronized_sched() is sufficient.
5700 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005701 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005702 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005703 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005704#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005705
5706#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005707 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005708 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005709 if (ret < 0)
5710 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005711 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005712#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005713
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005714 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005715 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005716 if (ret)
5717 goto out;
5718 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005719
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005720 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005721 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005722 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005723 out:
5724 mutex_unlock(&trace_types_lock);
5725
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005726 return ret;
5727}
5728
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005729static ssize_t
5730tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5731 size_t cnt, loff_t *ppos)
5732{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005733 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005734 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005735 int i;
5736 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005737 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005738
Steven Rostedt60063a62008-10-28 10:44:24 -04005739 ret = cnt;
5740
Li Zefanee6c2c12009-09-18 14:06:47 +08005741 if (cnt > MAX_TRACER_SIZE)
5742 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005743
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005744 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005745 return -EFAULT;
5746
5747 buf[cnt] = 0;
5748
5749 /* strip ending whitespace. */
5750 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5751 buf[i] = 0;
5752
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005753 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005754 if (err)
5755 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005756
Jiri Olsacf8517c2009-10-23 19:36:16 -04005757 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005758
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005759 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005760}
5761
5762static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005763tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5764 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005765{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005766 char buf[64];
5767 int r;
5768
Steven Rostedtcffae432008-05-12 21:21:00 +02005769 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005770 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005771 if (r > sizeof(buf))
5772 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005773 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005774}
5775
5776static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005777tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5778 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005779{
Hannes Eder5e398412009-02-10 19:44:34 +01005780 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005781 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005782
Peter Huewe22fe9b52011-06-07 21:58:27 +02005783 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5784 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005785 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005786
5787 *ptr = val * 1000;
5788
5789 return cnt;
5790}
5791
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005792static ssize_t
5793tracing_thresh_read(struct file *filp, char __user *ubuf,
5794 size_t cnt, loff_t *ppos)
5795{
5796 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5797}
5798
5799static ssize_t
5800tracing_thresh_write(struct file *filp, const char __user *ubuf,
5801 size_t cnt, loff_t *ppos)
5802{
5803 struct trace_array *tr = filp->private_data;
5804 int ret;
5805
5806 mutex_lock(&trace_types_lock);
5807 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5808 if (ret < 0)
5809 goto out;
5810
5811 if (tr->current_trace->update_thresh) {
5812 ret = tr->current_trace->update_thresh(tr);
5813 if (ret < 0)
5814 goto out;
5815 }
5816
5817 ret = cnt;
5818out:
5819 mutex_unlock(&trace_types_lock);
5820
5821 return ret;
5822}
5823
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005824#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005825
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005826static ssize_t
5827tracing_max_lat_read(struct file *filp, char __user *ubuf,
5828 size_t cnt, loff_t *ppos)
5829{
5830 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5831}
5832
5833static ssize_t
5834tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5835 size_t cnt, loff_t *ppos)
5836{
5837 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5838}
5839
Chen Gange428abb2015-11-10 05:15:15 +08005840#endif
5841
Steven Rostedtb3806b42008-05-12 21:20:46 +02005842static int tracing_open_pipe(struct inode *inode, struct file *filp)
5843{
Oleg Nesterov15544202013-07-23 17:25:57 +02005844 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005845 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005846 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005847
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005848 ret = tracing_check_open_get_tr(tr);
5849 if (ret)
5850 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005851
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005852 mutex_lock(&trace_types_lock);
5853
Steven Rostedtb3806b42008-05-12 21:20:46 +02005854 /* create a buffer to store the information to pass to userspace */
5855 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005856 if (!iter) {
5857 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005858 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005859 goto out;
5860 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005861
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005862 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005863 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005864
5865 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5866 ret = -ENOMEM;
5867 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305868 }
5869
Steven Rostedta3097202008-11-07 22:36:02 -05005870 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305871 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005872
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005873 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005874 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5875
David Sharp8be07092012-11-13 12:18:22 -08005876 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005877 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005878 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5879
Oleg Nesterov15544202013-07-23 17:25:57 +02005880 iter->tr = tr;
5881 iter->trace_buffer = &tr->trace_buffer;
5882 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005883 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005884 filp->private_data = iter;
5885
Steven Rostedt107bad82008-05-12 21:21:01 +02005886 if (iter->trace->pipe_open)
5887 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005888
Arnd Bergmannb4447862010-07-07 23:40:11 +02005889 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005890
5891 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005892out:
5893 mutex_unlock(&trace_types_lock);
5894 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005895
5896fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005897 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005898 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005899 mutex_unlock(&trace_types_lock);
5900 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005901}
5902
5903static int tracing_release_pipe(struct inode *inode, struct file *file)
5904{
5905 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005906 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005907
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005908 mutex_lock(&trace_types_lock);
5909
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005910 tr->current_trace->ref--;
5911
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005912 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005913 iter->trace->pipe_close(iter);
5914
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005915 mutex_unlock(&trace_types_lock);
5916
Rusty Russell44623442009-01-01 10:12:23 +10305917 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005918 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005919 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005920
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005921 trace_array_put(tr);
5922
Steven Rostedtb3806b42008-05-12 21:20:46 +02005923 return 0;
5924}
5925
Al Viro9dd95742017-07-03 00:42:43 -04005926static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005927trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005928{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005929 struct trace_array *tr = iter->tr;
5930
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005931 /* Iterators are static, they should be filled or empty */
5932 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005933 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005934
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005935 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005936 /*
5937 * Always select as readable when in blocking mode
5938 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005939 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005940 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005941 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005942 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005943}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005944
Al Viro9dd95742017-07-03 00:42:43 -04005945static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005946tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5947{
5948 struct trace_iterator *iter = filp->private_data;
5949
5950 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005951}
5952
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005953/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005954static int tracing_wait_pipe(struct file *filp)
5955{
5956 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005957 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005958
5959 while (trace_empty(iter)) {
5960
5961 if ((filp->f_flags & O_NONBLOCK)) {
5962 return -EAGAIN;
5963 }
5964
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005965 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005966 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005967 * We still block if tracing is disabled, but we have never
5968 * read anything. This allows a user to cat this file, and
5969 * then enable tracing. But after we have read something,
5970 * we give an EOF when tracing is again disabled.
5971 *
5972 * iter->pos will be 0 if we haven't read anything.
5973 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005974 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005975 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005976
5977 mutex_unlock(&iter->mutex);
5978
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05005979 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005980
5981 mutex_lock(&iter->mutex);
5982
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005983 if (ret)
5984 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005985 }
5986
5987 return 1;
5988}
5989
Steven Rostedtb3806b42008-05-12 21:20:46 +02005990/*
5991 * Consumer reader.
5992 */
5993static ssize_t
5994tracing_read_pipe(struct file *filp, char __user *ubuf,
5995 size_t cnt, loff_t *ppos)
5996{
5997 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005998 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005999
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006000 /*
6001 * Avoid more than one consumer on a single file descriptor
6002 * This is just a matter of traces coherency, the ring buffer itself
6003 * is protected.
6004 */
6005 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006006
6007 /* return any leftover data */
6008 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6009 if (sret != -EBUSY)
6010 goto out;
6011
6012 trace_seq_init(&iter->seq);
6013
Steven Rostedt107bad82008-05-12 21:21:01 +02006014 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006015 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6016 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006017 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006018 }
6019
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006020waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006021 sret = tracing_wait_pipe(filp);
6022 if (sret <= 0)
6023 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006024
6025 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006026 if (trace_empty(iter)) {
6027 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006028 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006029 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006030
6031 if (cnt >= PAGE_SIZE)
6032 cnt = PAGE_SIZE - 1;
6033
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006034 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006035 memset(&iter->seq, 0,
6036 sizeof(struct trace_iterator) -
6037 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006038 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006039 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006040
Lai Jiangshan4f535962009-05-18 19:35:34 +08006041 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006042 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006043 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006044 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006045 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006046
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006047 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006048 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006049 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006050 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006051 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006052 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006053 if (ret != TRACE_TYPE_NO_CONSUME)
6054 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006055
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006056 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006057 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006058
6059 /*
6060 * Setting the full flag means we reached the trace_seq buffer
6061 * size and we should leave by partial output condition above.
6062 * One of the trace_seq_* functions is not used properly.
6063 */
6064 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6065 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006066 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006067 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006068 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006069
Steven Rostedtb3806b42008-05-12 21:20:46 +02006070 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006071 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006072 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006073 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006074
6075 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006076 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006077 * entries, go back to wait for more entries.
6078 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006079 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006080 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006081
Steven Rostedt107bad82008-05-12 21:21:01 +02006082out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006083 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006084
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006085 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006086}
6087
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006088static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6089 unsigned int idx)
6090{
6091 __free_page(spd->pages[idx]);
6092}
6093
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006094static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006095 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006096 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006097 .steal = generic_pipe_buf_steal,
6098 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006099};
6100
Steven Rostedt34cd4992009-02-09 12:06:29 -05006101static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006102tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006103{
6104 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006105 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006106 int ret;
6107
6108 /* Seq buffer is page-sized, exactly what we need. */
6109 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006110 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006111 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006112
6113 if (trace_seq_has_overflowed(&iter->seq)) {
6114 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006115 break;
6116 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006117
6118 /*
6119 * This should not be hit, because it should only
6120 * be set if the iter->seq overflowed. But check it
6121 * anyway to be safe.
6122 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006123 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006124 iter->seq.seq.len = save_len;
6125 break;
6126 }
6127
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006128 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006129 if (rem < count) {
6130 rem = 0;
6131 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006132 break;
6133 }
6134
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006135 if (ret != TRACE_TYPE_NO_CONSUME)
6136 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006137 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006138 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006139 rem = 0;
6140 iter->ent = NULL;
6141 break;
6142 }
6143 }
6144
6145 return rem;
6146}
6147
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006148static ssize_t tracing_splice_read_pipe(struct file *filp,
6149 loff_t *ppos,
6150 struct pipe_inode_info *pipe,
6151 size_t len,
6152 unsigned int flags)
6153{
Jens Axboe35f3d142010-05-20 10:43:18 +02006154 struct page *pages_def[PIPE_DEF_BUFFERS];
6155 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006156 struct trace_iterator *iter = filp->private_data;
6157 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006158 .pages = pages_def,
6159 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006160 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006161 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006162 .ops = &tracing_pipe_buf_ops,
6163 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006164 };
6165 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006166 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006167 unsigned int i;
6168
Jens Axboe35f3d142010-05-20 10:43:18 +02006169 if (splice_grow_spd(pipe, &spd))
6170 return -ENOMEM;
6171
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006172 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006173
6174 if (iter->trace->splice_read) {
6175 ret = iter->trace->splice_read(iter, filp,
6176 ppos, pipe, len, flags);
6177 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006178 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006179 }
6180
6181 ret = tracing_wait_pipe(filp);
6182 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006183 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006184
Jason Wessel955b61e2010-08-05 09:22:23 -05006185 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006186 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006187 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006188 }
6189
Lai Jiangshan4f535962009-05-18 19:35:34 +08006190 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006191 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006192
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006193 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006194 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006195 spd.pages[i] = alloc_page(GFP_KERNEL);
6196 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006197 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006198
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006199 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006200
6201 /* Copy the data into the page, so we can start over. */
6202 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006203 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006204 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006205 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006206 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006207 break;
6208 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006209 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006210 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006211
Steven Rostedtf9520752009-03-02 14:04:40 -05006212 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006213 }
6214
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006215 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006216 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006217 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006218
6219 spd.nr_pages = i;
6220
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006221 if (i)
6222 ret = splice_to_pipe(pipe, &spd);
6223 else
6224 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006225out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006226 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006227 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006228
Steven Rostedt34cd4992009-02-09 12:06:29 -05006229out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006230 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006231 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006232}
6233
Steven Rostedta98a3c32008-05-12 21:20:59 +02006234static ssize_t
6235tracing_entries_read(struct file *filp, char __user *ubuf,
6236 size_t cnt, loff_t *ppos)
6237{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006238 struct inode *inode = file_inode(filp);
6239 struct trace_array *tr = inode->i_private;
6240 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006241 char buf[64];
6242 int r = 0;
6243 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006244
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006245 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006246
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006247 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006248 int cpu, buf_size_same;
6249 unsigned long size;
6250
6251 size = 0;
6252 buf_size_same = 1;
6253 /* check if all cpu sizes are same */
6254 for_each_tracing_cpu(cpu) {
6255 /* fill in the size from first enabled cpu */
6256 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006257 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6258 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006259 buf_size_same = 0;
6260 break;
6261 }
6262 }
6263
6264 if (buf_size_same) {
6265 if (!ring_buffer_expanded)
6266 r = sprintf(buf, "%lu (expanded: %lu)\n",
6267 size >> 10,
6268 trace_buf_size >> 10);
6269 else
6270 r = sprintf(buf, "%lu\n", size >> 10);
6271 } else
6272 r = sprintf(buf, "X\n");
6273 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006274 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006275
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006276 mutex_unlock(&trace_types_lock);
6277
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006278 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6279 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006280}
6281
6282static ssize_t
6283tracing_entries_write(struct file *filp, const char __user *ubuf,
6284 size_t cnt, loff_t *ppos)
6285{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006286 struct inode *inode = file_inode(filp);
6287 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006288 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006289 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006290
Peter Huewe22fe9b52011-06-07 21:58:27 +02006291 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6292 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006293 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006294
6295 /* must have at least 1 entry */
6296 if (!val)
6297 return -EINVAL;
6298
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006299 /* value is in KB */
6300 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006301 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006302 if (ret < 0)
6303 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006304
Jiri Olsacf8517c2009-10-23 19:36:16 -04006305 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006306
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006307 return cnt;
6308}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006309
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006310static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006311tracing_total_entries_read(struct file *filp, char __user *ubuf,
6312 size_t cnt, loff_t *ppos)
6313{
6314 struct trace_array *tr = filp->private_data;
6315 char buf[64];
6316 int r, cpu;
6317 unsigned long size = 0, expanded_size = 0;
6318
6319 mutex_lock(&trace_types_lock);
6320 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006321 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006322 if (!ring_buffer_expanded)
6323 expanded_size += trace_buf_size >> 10;
6324 }
6325 if (ring_buffer_expanded)
6326 r = sprintf(buf, "%lu\n", size);
6327 else
6328 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6329 mutex_unlock(&trace_types_lock);
6330
6331 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6332}
6333
6334static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006335tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6336 size_t cnt, loff_t *ppos)
6337{
6338 /*
6339 * There is no need to read what the user has written, this function
6340 * is just to make sure that there is no error when "echo" is used
6341 */
6342
6343 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006344
6345 return cnt;
6346}
6347
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006348static int
6349tracing_free_buffer_release(struct inode *inode, struct file *filp)
6350{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006351 struct trace_array *tr = inode->i_private;
6352
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006353 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006354 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006355 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006356 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006357 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006358
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006359 trace_array_put(tr);
6360
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006361 return 0;
6362}
6363
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006364static ssize_t
6365tracing_mark_write(struct file *filp, const char __user *ubuf,
6366 size_t cnt, loff_t *fpos)
6367{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006368 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006369 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006370 enum event_trigger_type tt = ETT_NONE;
Steven Rostedtd696b582011-09-22 11:50:27 -04006371 struct ring_buffer *buffer;
6372 struct print_entry *entry;
6373 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006374 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006375 int size;
6376 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006377
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006378/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006379#define FAULTED_STR "<faulted>"
6380#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006381
Steven Rostedtc76f0692008-11-07 22:36:02 -05006382 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006383 return -EINVAL;
6384
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006385 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006386 return -EINVAL;
6387
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006388 if (cnt > TRACE_BUF_SIZE)
6389 cnt = TRACE_BUF_SIZE;
6390
Steven Rostedtd696b582011-09-22 11:50:27 -04006391 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006392
Steven Rostedtd696b582011-09-22 11:50:27 -04006393 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006394 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6395
6396 /* If less than "<faulted>", then make sure we can still add that */
6397 if (cnt < FAULTED_SIZE)
6398 size += FAULTED_SIZE - cnt;
6399
Alexander Z Lam2d716192013-07-01 15:31:24 -07006400 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006401 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6402 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006403 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006404 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006405 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006406
6407 entry = ring_buffer_event_data(event);
6408 entry->ip = _THIS_IP_;
6409
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006410 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6411 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006412 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006413 cnt = FAULTED_SIZE;
6414 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006415 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006416 written = cnt;
6417 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006418
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006419 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6420 /* do not add \n before testing triggers, but add \0 */
6421 entry->buf[cnt] = '\0';
6422 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6423 }
6424
Steven Rostedtd696b582011-09-22 11:50:27 -04006425 if (entry->buf[cnt - 1] != '\n') {
6426 entry->buf[cnt] = '\n';
6427 entry->buf[cnt + 1] = '\0';
6428 } else
6429 entry->buf[cnt] = '\0';
6430
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006431 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006432
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006433 if (tt)
6434 event_triggers_post_call(tr->trace_marker_file, tt);
6435
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006436 if (written > 0)
6437 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006438
Steven Rostedtfa32e852016-07-06 15:25:08 -04006439 return written;
6440}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006441
Steven Rostedtfa32e852016-07-06 15:25:08 -04006442/* Limit it for now to 3K (including tag) */
6443#define RAW_DATA_MAX_SIZE (1024*3)
6444
6445static ssize_t
6446tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6447 size_t cnt, loff_t *fpos)
6448{
6449 struct trace_array *tr = filp->private_data;
6450 struct ring_buffer_event *event;
6451 struct ring_buffer *buffer;
6452 struct raw_data_entry *entry;
6453 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006454 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006455 int size;
6456 int len;
6457
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006458#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6459
Steven Rostedtfa32e852016-07-06 15:25:08 -04006460 if (tracing_disabled)
6461 return -EINVAL;
6462
6463 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6464 return -EINVAL;
6465
6466 /* The marker must at least have a tag id */
6467 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6468 return -EINVAL;
6469
6470 if (cnt > TRACE_BUF_SIZE)
6471 cnt = TRACE_BUF_SIZE;
6472
6473 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6474
Steven Rostedtfa32e852016-07-06 15:25:08 -04006475 local_save_flags(irq_flags);
6476 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006477 if (cnt < FAULT_SIZE_ID)
6478 size += FAULT_SIZE_ID - cnt;
6479
Steven Rostedtfa32e852016-07-06 15:25:08 -04006480 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006481 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6482 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006483 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006484 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006485 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006486
6487 entry = ring_buffer_event_data(event);
6488
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006489 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6490 if (len) {
6491 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006492 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006493 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006494 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006495 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006496
6497 __buffer_unlock_commit(buffer, event);
6498
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006499 if (written > 0)
6500 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006501
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006502 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006503}
6504
Li Zefan13f16d22009-12-08 11:16:11 +08006505static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006506{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006507 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006508 int i;
6509
6510 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006511 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006512 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006513 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6514 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006515 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006516
Li Zefan13f16d22009-12-08 11:16:11 +08006517 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006518}
6519
Tom Zanussid71bd342018-01-15 20:52:07 -06006520int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006521{
Zhaolei5079f322009-08-25 16:12:56 +08006522 int i;
6523
Zhaolei5079f322009-08-25 16:12:56 +08006524 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6525 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6526 break;
6527 }
6528 if (i == ARRAY_SIZE(trace_clocks))
6529 return -EINVAL;
6530
Zhaolei5079f322009-08-25 16:12:56 +08006531 mutex_lock(&trace_types_lock);
6532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006533 tr->clock_id = i;
6534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006535 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006536
David Sharp60303ed2012-10-11 16:27:52 -07006537 /*
6538 * New clock may not be consistent with the previous clock.
6539 * Reset the buffer so that it doesn't have incomparable timestamps.
6540 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006541 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006542
6543#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006544 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006545 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006546 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006547#endif
David Sharp60303ed2012-10-11 16:27:52 -07006548
Zhaolei5079f322009-08-25 16:12:56 +08006549 mutex_unlock(&trace_types_lock);
6550
Steven Rostedte1e232c2014-02-10 23:38:46 -05006551 return 0;
6552}
6553
6554static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6555 size_t cnt, loff_t *fpos)
6556{
6557 struct seq_file *m = filp->private_data;
6558 struct trace_array *tr = m->private;
6559 char buf[64];
6560 const char *clockstr;
6561 int ret;
6562
6563 if (cnt >= sizeof(buf))
6564 return -EINVAL;
6565
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006566 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006567 return -EFAULT;
6568
6569 buf[cnt] = 0;
6570
6571 clockstr = strstrip(buf);
6572
6573 ret = tracing_set_clock(tr, clockstr);
6574 if (ret)
6575 return ret;
6576
Zhaolei5079f322009-08-25 16:12:56 +08006577 *fpos += cnt;
6578
6579 return cnt;
6580}
6581
Li Zefan13f16d22009-12-08 11:16:11 +08006582static int tracing_clock_open(struct inode *inode, struct file *file)
6583{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006584 struct trace_array *tr = inode->i_private;
6585 int ret;
6586
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006587 ret = tracing_check_open_get_tr(tr);
6588 if (ret)
6589 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006590
6591 ret = single_open(file, tracing_clock_show, inode->i_private);
6592 if (ret < 0)
6593 trace_array_put(tr);
6594
6595 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006596}
6597
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006598static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6599{
6600 struct trace_array *tr = m->private;
6601
6602 mutex_lock(&trace_types_lock);
6603
6604 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6605 seq_puts(m, "delta [absolute]\n");
6606 else
6607 seq_puts(m, "[delta] absolute\n");
6608
6609 mutex_unlock(&trace_types_lock);
6610
6611 return 0;
6612}
6613
6614static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6615{
6616 struct trace_array *tr = inode->i_private;
6617 int ret;
6618
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006619 ret = tracing_check_open_get_tr(tr);
6620 if (ret)
6621 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006622
6623 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6624 if (ret < 0)
6625 trace_array_put(tr);
6626
6627 return ret;
6628}
6629
Tom Zanussi00b41452018-01-15 20:51:39 -06006630int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6631{
6632 int ret = 0;
6633
6634 mutex_lock(&trace_types_lock);
6635
6636 if (abs && tr->time_stamp_abs_ref++)
6637 goto out;
6638
6639 if (!abs) {
6640 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6641 ret = -EINVAL;
6642 goto out;
6643 }
6644
6645 if (--tr->time_stamp_abs_ref)
6646 goto out;
6647 }
6648
6649 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6650
6651#ifdef CONFIG_TRACER_MAX_TRACE
6652 if (tr->max_buffer.buffer)
6653 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6654#endif
6655 out:
6656 mutex_unlock(&trace_types_lock);
6657
6658 return ret;
6659}
6660
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006661struct ftrace_buffer_info {
6662 struct trace_iterator iter;
6663 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006664 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006665 unsigned int read;
6666};
6667
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006668#ifdef CONFIG_TRACER_SNAPSHOT
6669static int tracing_snapshot_open(struct inode *inode, struct file *file)
6670{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006671 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006672 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006673 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006674 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006675
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006676 ret = tracing_check_open_get_tr(tr);
6677 if (ret)
6678 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006679
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006680 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006681 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006682 if (IS_ERR(iter))
6683 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006684 } else {
6685 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006686 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006687 m = kzalloc(sizeof(*m), GFP_KERNEL);
6688 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006689 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006690 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6691 if (!iter) {
6692 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006693 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006694 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006695 ret = 0;
6696
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006697 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006698 iter->trace_buffer = &tr->max_buffer;
6699 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006700 m->private = iter;
6701 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006702 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006703out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006704 if (ret < 0)
6705 trace_array_put(tr);
6706
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006707 return ret;
6708}
6709
6710static ssize_t
6711tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6712 loff_t *ppos)
6713{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006714 struct seq_file *m = filp->private_data;
6715 struct trace_iterator *iter = m->private;
6716 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006717 unsigned long val;
6718 int ret;
6719
6720 ret = tracing_update_buffers();
6721 if (ret < 0)
6722 return ret;
6723
6724 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6725 if (ret)
6726 return ret;
6727
6728 mutex_lock(&trace_types_lock);
6729
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006730 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006731 ret = -EBUSY;
6732 goto out;
6733 }
6734
Tom Zanussia35873a2019-02-13 17:42:45 -06006735 arch_spin_lock(&tr->max_lock);
6736 if (tr->cond_snapshot)
6737 ret = -EBUSY;
6738 arch_spin_unlock(&tr->max_lock);
6739 if (ret)
6740 goto out;
6741
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006742 switch (val) {
6743 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006744 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6745 ret = -EINVAL;
6746 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006747 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006748 if (tr->allocated_snapshot)
6749 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006750 break;
6751 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006752/* Only allow per-cpu swap if the ring buffer supports it */
6753#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6754 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6755 ret = -EINVAL;
6756 break;
6757 }
6758#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006759 if (tr->allocated_snapshot)
6760 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6761 &tr->trace_buffer, iter->cpu_file);
6762 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006763 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006764 if (ret < 0)
6765 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006766 local_irq_disable();
6767 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006768 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006769 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006770 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006771 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006772 local_irq_enable();
6773 break;
6774 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006775 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006776 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6777 tracing_reset_online_cpus(&tr->max_buffer);
6778 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04006779 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006780 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006781 break;
6782 }
6783
6784 if (ret >= 0) {
6785 *ppos += cnt;
6786 ret = cnt;
6787 }
6788out:
6789 mutex_unlock(&trace_types_lock);
6790 return ret;
6791}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006792
6793static int tracing_snapshot_release(struct inode *inode, struct file *file)
6794{
6795 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006796 int ret;
6797
6798 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006799
6800 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006801 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006802
6803 /* If write only, the seq_file is just a stub */
6804 if (m)
6805 kfree(m->private);
6806 kfree(m);
6807
6808 return 0;
6809}
6810
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006811static int tracing_buffers_open(struct inode *inode, struct file *filp);
6812static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6813 size_t count, loff_t *ppos);
6814static int tracing_buffers_release(struct inode *inode, struct file *file);
6815static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6816 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6817
6818static int snapshot_raw_open(struct inode *inode, struct file *filp)
6819{
6820 struct ftrace_buffer_info *info;
6821 int ret;
6822
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04006823 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006824 ret = tracing_buffers_open(inode, filp);
6825 if (ret < 0)
6826 return ret;
6827
6828 info = filp->private_data;
6829
6830 if (info->iter.trace->use_max_tr) {
6831 tracing_buffers_release(inode, filp);
6832 return -EBUSY;
6833 }
6834
6835 info->iter.snapshot = true;
6836 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6837
6838 return ret;
6839}
6840
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006841#endif /* CONFIG_TRACER_SNAPSHOT */
6842
6843
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006844static const struct file_operations tracing_thresh_fops = {
6845 .open = tracing_open_generic,
6846 .read = tracing_thresh_read,
6847 .write = tracing_thresh_write,
6848 .llseek = generic_file_llseek,
6849};
6850
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006851#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006852static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006853 .open = tracing_open_generic,
6854 .read = tracing_max_lat_read,
6855 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006856 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006857};
Chen Gange428abb2015-11-10 05:15:15 +08006858#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006859
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006860static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006861 .open = tracing_open_generic,
6862 .read = tracing_set_trace_read,
6863 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006864 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006865};
6866
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006867static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006868 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006869 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006870 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006871 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006872 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006873 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006874};
6875
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006876static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006877 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006878 .read = tracing_entries_read,
6879 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006880 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006881 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006882};
6883
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006884static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006885 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006886 .read = tracing_total_entries_read,
6887 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006888 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006889};
6890
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006891static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006892 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006893 .write = tracing_free_buffer_write,
6894 .release = tracing_free_buffer_release,
6895};
6896
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006897static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006898 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006899 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006900 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006901 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006902};
6903
Steven Rostedtfa32e852016-07-06 15:25:08 -04006904static const struct file_operations tracing_mark_raw_fops = {
6905 .open = tracing_open_generic_tr,
6906 .write = tracing_mark_raw_write,
6907 .llseek = generic_file_llseek,
6908 .release = tracing_release_generic_tr,
6909};
6910
Zhaolei5079f322009-08-25 16:12:56 +08006911static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006912 .open = tracing_clock_open,
6913 .read = seq_read,
6914 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006915 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006916 .write = tracing_clock_write,
6917};
6918
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006919static const struct file_operations trace_time_stamp_mode_fops = {
6920 .open = tracing_time_stamp_mode_open,
6921 .read = seq_read,
6922 .llseek = seq_lseek,
6923 .release = tracing_single_release_tr,
6924};
6925
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006926#ifdef CONFIG_TRACER_SNAPSHOT
6927static const struct file_operations snapshot_fops = {
6928 .open = tracing_snapshot_open,
6929 .read = seq_read,
6930 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006931 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006932 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006933};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006934
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006935static const struct file_operations snapshot_raw_fops = {
6936 .open = snapshot_raw_open,
6937 .read = tracing_buffers_read,
6938 .release = tracing_buffers_release,
6939 .splice_read = tracing_buffers_splice_read,
6940 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006941};
6942
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006943#endif /* CONFIG_TRACER_SNAPSHOT */
6944
Tom Zanussi8a062902019-03-31 18:48:15 -05006945#define TRACING_LOG_ERRS_MAX 8
6946#define TRACING_LOG_LOC_MAX 128
6947
6948#define CMD_PREFIX " Command: "
6949
6950struct err_info {
6951 const char **errs; /* ptr to loc-specific array of err strings */
6952 u8 type; /* index into errs -> specific err string */
6953 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6954 u64 ts;
6955};
6956
6957struct tracing_log_err {
6958 struct list_head list;
6959 struct err_info info;
6960 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6961 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6962};
6963
Tom Zanussi8a062902019-03-31 18:48:15 -05006964static DEFINE_MUTEX(tracing_err_log_lock);
6965
YueHaibingff585c52019-06-14 23:32:10 +08006966static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05006967{
6968 struct tracing_log_err *err;
6969
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006970 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05006971 err = kzalloc(sizeof(*err), GFP_KERNEL);
6972 if (!err)
6973 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006974 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05006975
6976 return err;
6977 }
6978
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006979 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05006980 list_del(&err->list);
6981
6982 return err;
6983}
6984
6985/**
6986 * err_pos - find the position of a string within a command for error careting
6987 * @cmd: The tracing command that caused the error
6988 * @str: The string to position the caret at within @cmd
6989 *
6990 * Finds the position of the first occurence of @str within @cmd. The
6991 * return value can be passed to tracing_log_err() for caret placement
6992 * within @cmd.
6993 *
6994 * Returns the index within @cmd of the first occurence of @str or 0
6995 * if @str was not found.
6996 */
6997unsigned int err_pos(char *cmd, const char *str)
6998{
6999 char *found;
7000
7001 if (WARN_ON(!strlen(cmd)))
7002 return 0;
7003
7004 found = strstr(cmd, str);
7005 if (found)
7006 return found - cmd;
7007
7008 return 0;
7009}
7010
7011/**
7012 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007013 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007014 * @loc: A string describing where the error occurred
7015 * @cmd: The tracing command that caused the error
7016 * @errs: The array of loc-specific static error strings
7017 * @type: The index into errs[], which produces the specific static err string
7018 * @pos: The position the caret should be placed in the cmd
7019 *
7020 * Writes an error into tracing/error_log of the form:
7021 *
7022 * <loc>: error: <text>
7023 * Command: <cmd>
7024 * ^
7025 *
7026 * tracing/error_log is a small log file containing the last
7027 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7028 * unless there has been a tracing error, and the error log can be
7029 * cleared and have its memory freed by writing the empty string in
7030 * truncation mode to it i.e. echo > tracing/error_log.
7031 *
7032 * NOTE: the @errs array along with the @type param are used to
7033 * produce a static error string - this string is not copied and saved
7034 * when the error is logged - only a pointer to it is saved. See
7035 * existing callers for examples of how static strings are typically
7036 * defined for use with tracing_log_err().
7037 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007038void tracing_log_err(struct trace_array *tr,
7039 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007040 const char **errs, u8 type, u8 pos)
7041{
7042 struct tracing_log_err *err;
7043
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007044 if (!tr)
7045 tr = &global_trace;
7046
Tom Zanussi8a062902019-03-31 18:48:15 -05007047 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007048 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007049 if (PTR_ERR(err) == -ENOMEM) {
7050 mutex_unlock(&tracing_err_log_lock);
7051 return;
7052 }
7053
7054 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7055 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7056
7057 err->info.errs = errs;
7058 err->info.type = type;
7059 err->info.pos = pos;
7060 err->info.ts = local_clock();
7061
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007062 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007063 mutex_unlock(&tracing_err_log_lock);
7064}
7065
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007066static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007067{
7068 struct tracing_log_err *err, *next;
7069
7070 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007071 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007072 list_del(&err->list);
7073 kfree(err);
7074 }
7075
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007076 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007077 mutex_unlock(&tracing_err_log_lock);
7078}
7079
7080static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7081{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007082 struct trace_array *tr = m->private;
7083
Tom Zanussi8a062902019-03-31 18:48:15 -05007084 mutex_lock(&tracing_err_log_lock);
7085
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007086 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007087}
7088
7089static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7090{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007091 struct trace_array *tr = m->private;
7092
7093 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007094}
7095
7096static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7097{
7098 mutex_unlock(&tracing_err_log_lock);
7099}
7100
7101static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7102{
7103 u8 i;
7104
7105 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7106 seq_putc(m, ' ');
7107 for (i = 0; i < pos; i++)
7108 seq_putc(m, ' ');
7109 seq_puts(m, "^\n");
7110}
7111
7112static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7113{
7114 struct tracing_log_err *err = v;
7115
7116 if (err) {
7117 const char *err_text = err->info.errs[err->info.type];
7118 u64 sec = err->info.ts;
7119 u32 nsec;
7120
7121 nsec = do_div(sec, NSEC_PER_SEC);
7122 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7123 err->loc, err_text);
7124 seq_printf(m, "%s", err->cmd);
7125 tracing_err_log_show_pos(m, err->info.pos);
7126 }
7127
7128 return 0;
7129}
7130
7131static const struct seq_operations tracing_err_log_seq_ops = {
7132 .start = tracing_err_log_seq_start,
7133 .next = tracing_err_log_seq_next,
7134 .stop = tracing_err_log_seq_stop,
7135 .show = tracing_err_log_seq_show
7136};
7137
7138static int tracing_err_log_open(struct inode *inode, struct file *file)
7139{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007140 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007141 int ret = 0;
7142
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007143 ret = tracing_check_open_get_tr(tr);
7144 if (ret)
7145 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007146
Tom Zanussi8a062902019-03-31 18:48:15 -05007147 /* If this file was opened for write, then erase contents */
7148 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007149 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007150
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007151 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007152 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007153 if (!ret) {
7154 struct seq_file *m = file->private_data;
7155 m->private = tr;
7156 } else {
7157 trace_array_put(tr);
7158 }
7159 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007160 return ret;
7161}
7162
7163static ssize_t tracing_err_log_write(struct file *file,
7164 const char __user *buffer,
7165 size_t count, loff_t *ppos)
7166{
7167 return count;
7168}
7169
Takeshi Misawad122ed62019-06-28 19:56:40 +09007170static int tracing_err_log_release(struct inode *inode, struct file *file)
7171{
7172 struct trace_array *tr = inode->i_private;
7173
7174 trace_array_put(tr);
7175
7176 if (file->f_mode & FMODE_READ)
7177 seq_release(inode, file);
7178
7179 return 0;
7180}
7181
Tom Zanussi8a062902019-03-31 18:48:15 -05007182static const struct file_operations tracing_err_log_fops = {
7183 .open = tracing_err_log_open,
7184 .write = tracing_err_log_write,
7185 .read = seq_read,
7186 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007187 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007188};
7189
Steven Rostedt2cadf912008-12-01 22:20:19 -05007190static int tracing_buffers_open(struct inode *inode, struct file *filp)
7191{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007192 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007193 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007194 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007195
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007196 ret = tracing_check_open_get_tr(tr);
7197 if (ret)
7198 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007199
Steven Rostedt2cadf912008-12-01 22:20:19 -05007200 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007201 if (!info) {
7202 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007203 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007204 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007205
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007206 mutex_lock(&trace_types_lock);
7207
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007208 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007209 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007210 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007211 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007212 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007213 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007214 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007215
7216 filp->private_data = info;
7217
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007218 tr->current_trace->ref++;
7219
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007220 mutex_unlock(&trace_types_lock);
7221
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007222 ret = nonseekable_open(inode, filp);
7223 if (ret < 0)
7224 trace_array_put(tr);
7225
7226 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007227}
7228
Al Viro9dd95742017-07-03 00:42:43 -04007229static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007230tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7231{
7232 struct ftrace_buffer_info *info = filp->private_data;
7233 struct trace_iterator *iter = &info->iter;
7234
7235 return trace_poll(iter, filp, poll_table);
7236}
7237
Steven Rostedt2cadf912008-12-01 22:20:19 -05007238static ssize_t
7239tracing_buffers_read(struct file *filp, char __user *ubuf,
7240 size_t count, loff_t *ppos)
7241{
7242 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007243 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007244 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007245 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007246
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007247 if (!count)
7248 return 0;
7249
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007250#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007251 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7252 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007253#endif
7254
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007255 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007256 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7257 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007258 if (IS_ERR(info->spare)) {
7259 ret = PTR_ERR(info->spare);
7260 info->spare = NULL;
7261 } else {
7262 info->spare_cpu = iter->cpu_file;
7263 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007264 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007265 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007266 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007267
Steven Rostedt2cadf912008-12-01 22:20:19 -05007268 /* Do we have previous read data to read? */
7269 if (info->read < PAGE_SIZE)
7270 goto read;
7271
Steven Rostedtb6273442013-02-28 13:44:11 -05007272 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007273 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007274 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007275 &info->spare,
7276 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007277 iter->cpu_file, 0);
7278 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007279
7280 if (ret < 0) {
7281 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007282 if ((filp->f_flags & O_NONBLOCK))
7283 return -EAGAIN;
7284
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007285 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007286 if (ret)
7287 return ret;
7288
Steven Rostedtb6273442013-02-28 13:44:11 -05007289 goto again;
7290 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007291 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007292 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007293
Steven Rostedt436fc282011-10-14 10:44:25 -04007294 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007295 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007296 size = PAGE_SIZE - info->read;
7297 if (size > count)
7298 size = count;
7299
7300 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007301 if (ret == size)
7302 return -EFAULT;
7303
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007304 size -= ret;
7305
Steven Rostedt2cadf912008-12-01 22:20:19 -05007306 *ppos += size;
7307 info->read += size;
7308
7309 return size;
7310}
7311
7312static int tracing_buffers_release(struct inode *inode, struct file *file)
7313{
7314 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007315 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007316
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007317 mutex_lock(&trace_types_lock);
7318
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007319 iter->tr->current_trace->ref--;
7320
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007321 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007322
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007323 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007324 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7325 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007326 kfree(info);
7327
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007328 mutex_unlock(&trace_types_lock);
7329
Steven Rostedt2cadf912008-12-01 22:20:19 -05007330 return 0;
7331}
7332
7333struct buffer_ref {
7334 struct ring_buffer *buffer;
7335 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007336 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007337 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007338};
7339
Jann Hornb9872222019-04-04 23:59:25 +02007340static void buffer_ref_release(struct buffer_ref *ref)
7341{
7342 if (!refcount_dec_and_test(&ref->refcount))
7343 return;
7344 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7345 kfree(ref);
7346}
7347
Steven Rostedt2cadf912008-12-01 22:20:19 -05007348static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7349 struct pipe_buffer *buf)
7350{
7351 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7352
Jann Hornb9872222019-04-04 23:59:25 +02007353 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007354 buf->private = 0;
7355}
7356
Matthew Wilcox15fab632019-04-05 14:02:10 -07007357static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007358 struct pipe_buffer *buf)
7359{
7360 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7361
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007362 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007363 return false;
7364
Jann Hornb9872222019-04-04 23:59:25 +02007365 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007366 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007367}
7368
7369/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007370static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007371 .confirm = generic_pipe_buf_confirm,
7372 .release = buffer_pipe_buf_release,
Jann Hornb9872222019-04-04 23:59:25 +02007373 .steal = generic_pipe_buf_nosteal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007374 .get = buffer_pipe_buf_get,
7375};
7376
7377/*
7378 * Callback from splice_to_pipe(), if we need to release some pages
7379 * at the end of the spd in case we error'ed out in filling the pipe.
7380 */
7381static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7382{
7383 struct buffer_ref *ref =
7384 (struct buffer_ref *)spd->partial[i].private;
7385
Jann Hornb9872222019-04-04 23:59:25 +02007386 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007387 spd->partial[i].private = 0;
7388}
7389
7390static ssize_t
7391tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7392 struct pipe_inode_info *pipe, size_t len,
7393 unsigned int flags)
7394{
7395 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007396 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007397 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7398 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007399 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007400 .pages = pages_def,
7401 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007402 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007403 .ops = &buffer_pipe_buf_ops,
7404 .spd_release = buffer_spd_release,
7405 };
7406 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007407 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007408 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007409
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007410#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007411 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7412 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007413#endif
7414
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007415 if (*ppos & (PAGE_SIZE - 1))
7416 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007417
7418 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007419 if (len < PAGE_SIZE)
7420 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007421 len &= PAGE_MASK;
7422 }
7423
Al Viro1ae22932016-09-17 18:31:46 -04007424 if (splice_grow_spd(pipe, &spd))
7425 return -ENOMEM;
7426
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007427 again:
7428 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007429 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007430
Al Viroa786c062014-04-11 12:01:03 -04007431 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007432 struct page *page;
7433 int r;
7434
7435 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007436 if (!ref) {
7437 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007438 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007439 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007440
Jann Hornb9872222019-04-04 23:59:25 +02007441 refcount_set(&ref->refcount, 1);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007442 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007443 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007444 if (IS_ERR(ref->page)) {
7445 ret = PTR_ERR(ref->page);
7446 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007447 kfree(ref);
7448 break;
7449 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007450 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007451
7452 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007453 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007454 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007455 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7456 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007457 kfree(ref);
7458 break;
7459 }
7460
Steven Rostedt2cadf912008-12-01 22:20:19 -05007461 page = virt_to_page(ref->page);
7462
7463 spd.pages[i] = page;
7464 spd.partial[i].len = PAGE_SIZE;
7465 spd.partial[i].offset = 0;
7466 spd.partial[i].private = (unsigned long)ref;
7467 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007468 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007470 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007471 }
7472
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007473 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007474 spd.nr_pages = i;
7475
7476 /* did we read anything? */
7477 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007478 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007479 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007480
Al Viro1ae22932016-09-17 18:31:46 -04007481 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007482 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007483 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007484
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007485 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007486 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007487 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007488
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007489 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007490 }
7491
7492 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007493out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007494 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007495
Steven Rostedt2cadf912008-12-01 22:20:19 -05007496 return ret;
7497}
7498
7499static const struct file_operations tracing_buffers_fops = {
7500 .open = tracing_buffers_open,
7501 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007502 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007503 .release = tracing_buffers_release,
7504 .splice_read = tracing_buffers_splice_read,
7505 .llseek = no_llseek,
7506};
7507
Steven Rostedtc8d77182009-04-29 18:03:45 -04007508static ssize_t
7509tracing_stats_read(struct file *filp, char __user *ubuf,
7510 size_t count, loff_t *ppos)
7511{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007512 struct inode *inode = file_inode(filp);
7513 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007514 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007515 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007516 struct trace_seq *s;
7517 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007518 unsigned long long t;
7519 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007520
Li Zefane4f2d102009-06-15 10:57:28 +08007521 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007522 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007523 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007524
7525 trace_seq_init(s);
7526
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007527 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007528 trace_seq_printf(s, "entries: %ld\n", cnt);
7529
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007530 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007531 trace_seq_printf(s, "overrun: %ld\n", cnt);
7532
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007533 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007534 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7535
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007536 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007537 trace_seq_printf(s, "bytes: %ld\n", cnt);
7538
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007539 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007540 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007541 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007542 usec_rem = do_div(t, USEC_PER_SEC);
7543 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7544 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007545
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007546 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007547 usec_rem = do_div(t, USEC_PER_SEC);
7548 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7549 } else {
7550 /* counter or tsc mode for trace_clock */
7551 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007552 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007553
7554 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007555 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007556 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007557
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007558 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007559 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7560
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007561 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007562 trace_seq_printf(s, "read events: %ld\n", cnt);
7563
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007564 count = simple_read_from_buffer(ubuf, count, ppos,
7565 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007566
7567 kfree(s);
7568
7569 return count;
7570}
7571
7572static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007573 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007574 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007575 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007576 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007577};
7578
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007579#ifdef CONFIG_DYNAMIC_FTRACE
7580
7581static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007582tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007583 size_t cnt, loff_t *ppos)
7584{
7585 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007586 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007587 int r;
7588
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007589 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007590 buf[r++] = '\n';
7591
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007592 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007593}
7594
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007595static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007596 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007597 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007598 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007599};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007600#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007601
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007602#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7603static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007604ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007605 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007606 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007607{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007608 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007609}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007610
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007611static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007612ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007613 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007614 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007615{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007616 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007617 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007618
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007619 if (mapper)
7620 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007621
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007622 if (count) {
7623
7624 if (*count <= 0)
7625 return;
7626
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007627 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007628 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007629
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007630 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007631}
7632
7633static int
7634ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7635 struct ftrace_probe_ops *ops, void *data)
7636{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007637 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007638 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007639
7640 seq_printf(m, "%ps:", (void *)ip);
7641
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007642 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007643
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007644 if (mapper)
7645 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7646
7647 if (count)
7648 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007649 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007650 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007651
7652 return 0;
7653}
7654
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007655static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007656ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007657 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007658{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007659 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007660
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007661 if (!mapper) {
7662 mapper = allocate_ftrace_func_mapper();
7663 if (!mapper)
7664 return -ENOMEM;
7665 *data = mapper;
7666 }
7667
7668 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007669}
7670
7671static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007672ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007673 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007674{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007675 struct ftrace_func_mapper *mapper = data;
7676
7677 if (!ip) {
7678 if (!mapper)
7679 return;
7680 free_ftrace_func_mapper(mapper, NULL);
7681 return;
7682 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007683
7684 ftrace_func_mapper_remove_ip(mapper, ip);
7685}
7686
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007687static struct ftrace_probe_ops snapshot_probe_ops = {
7688 .func = ftrace_snapshot,
7689 .print = ftrace_snapshot_print,
7690};
7691
7692static struct ftrace_probe_ops snapshot_count_probe_ops = {
7693 .func = ftrace_count_snapshot,
7694 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007695 .init = ftrace_snapshot_init,
7696 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007697};
7698
7699static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007700ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007701 char *glob, char *cmd, char *param, int enable)
7702{
7703 struct ftrace_probe_ops *ops;
7704 void *count = (void *)-1;
7705 char *number;
7706 int ret;
7707
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007708 if (!tr)
7709 return -ENODEV;
7710
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007711 /* hash funcs only work with set_ftrace_filter */
7712 if (!enable)
7713 return -EINVAL;
7714
7715 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7716
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007717 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007718 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007719
7720 if (!param)
7721 goto out_reg;
7722
7723 number = strsep(&param, ":");
7724
7725 if (!strlen(number))
7726 goto out_reg;
7727
7728 /*
7729 * We use the callback data field (which is a pointer)
7730 * as our counter.
7731 */
7732 ret = kstrtoul(number, 0, (unsigned long *)&count);
7733 if (ret)
7734 return ret;
7735
7736 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007737 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007738 if (ret < 0)
7739 goto out;
7740
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007741 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007742
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007743 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007744 return ret < 0 ? ret : 0;
7745}
7746
7747static struct ftrace_func_command ftrace_snapshot_cmd = {
7748 .name = "snapshot",
7749 .func = ftrace_trace_snapshot_callback,
7750};
7751
Tom Zanussi38de93a2013-10-24 08:34:18 -05007752static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007753{
7754 return register_ftrace_command(&ftrace_snapshot_cmd);
7755}
7756#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007757static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007758#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007759
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007760static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007761{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007762 if (WARN_ON(!tr->dir))
7763 return ERR_PTR(-ENODEV);
7764
7765 /* Top directory uses NULL as the parent */
7766 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7767 return NULL;
7768
7769 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007770 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007771}
7772
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007773static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7774{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007775 struct dentry *d_tracer;
7776
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007777 if (tr->percpu_dir)
7778 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007779
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007780 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007781 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007782 return NULL;
7783
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007784 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007785
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007786 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007787 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007788
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007789 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007790}
7791
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007792static struct dentry *
7793trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7794 void *data, long cpu, const struct file_operations *fops)
7795{
7796 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7797
7798 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007799 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007800 return ret;
7801}
7802
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007803static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007804tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007806 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007807 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007808 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007809
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007810 if (!d_percpu)
7811 return;
7812
Steven Rostedtdd49a382010-10-20 21:51:26 -04007813 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007814 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007815 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007816 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007817 return;
7818 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007819
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007820 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007821 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007822 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007823
7824 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007825 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007826 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007827
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007828 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007829 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007830
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007831 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007832 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007833
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007834 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007835 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007836
7837#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007838 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007839 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007840
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007841 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007842 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007843#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007844}
7845
Steven Rostedt60a11772008-05-12 21:20:44 +02007846#ifdef CONFIG_FTRACE_SELFTEST
7847/* Let selftest have access to static functions in this file */
7848#include "trace_selftest.c"
7849#endif
7850
Steven Rostedt577b7852009-02-26 23:43:05 -05007851static ssize_t
7852trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7853 loff_t *ppos)
7854{
7855 struct trace_option_dentry *topt = filp->private_data;
7856 char *buf;
7857
7858 if (topt->flags->val & topt->opt->bit)
7859 buf = "1\n";
7860 else
7861 buf = "0\n";
7862
7863 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7864}
7865
7866static ssize_t
7867trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7868 loff_t *ppos)
7869{
7870 struct trace_option_dentry *topt = filp->private_data;
7871 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007872 int ret;
7873
Peter Huewe22fe9b52011-06-07 21:58:27 +02007874 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7875 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007876 return ret;
7877
Li Zefan8d18eaa2009-12-08 11:17:06 +08007878 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007879 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007880
7881 if (!!(topt->flags->val & topt->opt->bit) != val) {
7882 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007883 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007884 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007885 mutex_unlock(&trace_types_lock);
7886 if (ret)
7887 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007888 }
7889
7890 *ppos += cnt;
7891
7892 return cnt;
7893}
7894
7895
7896static const struct file_operations trace_options_fops = {
7897 .open = tracing_open_generic,
7898 .read = trace_options_read,
7899 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007900 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007901};
7902
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007903/*
7904 * In order to pass in both the trace_array descriptor as well as the index
7905 * to the flag that the trace option file represents, the trace_array
7906 * has a character array of trace_flags_index[], which holds the index
7907 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7908 * The address of this character array is passed to the flag option file
7909 * read/write callbacks.
7910 *
7911 * In order to extract both the index and the trace_array descriptor,
7912 * get_tr_index() uses the following algorithm.
7913 *
7914 * idx = *ptr;
7915 *
7916 * As the pointer itself contains the address of the index (remember
7917 * index[1] == 1).
7918 *
7919 * Then to get the trace_array descriptor, by subtracting that index
7920 * from the ptr, we get to the start of the index itself.
7921 *
7922 * ptr - idx == &index[0]
7923 *
7924 * Then a simple container_of() from that pointer gets us to the
7925 * trace_array descriptor.
7926 */
7927static void get_tr_index(void *data, struct trace_array **ptr,
7928 unsigned int *pindex)
7929{
7930 *pindex = *(unsigned char *)data;
7931
7932 *ptr = container_of(data - *pindex, struct trace_array,
7933 trace_flags_index);
7934}
7935
Steven Rostedta8259072009-02-26 22:19:12 -05007936static ssize_t
7937trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7938 loff_t *ppos)
7939{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007940 void *tr_index = filp->private_data;
7941 struct trace_array *tr;
7942 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007943 char *buf;
7944
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007945 get_tr_index(tr_index, &tr, &index);
7946
7947 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007948 buf = "1\n";
7949 else
7950 buf = "0\n";
7951
7952 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7953}
7954
7955static ssize_t
7956trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7957 loff_t *ppos)
7958{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007959 void *tr_index = filp->private_data;
7960 struct trace_array *tr;
7961 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007962 unsigned long val;
7963 int ret;
7964
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007965 get_tr_index(tr_index, &tr, &index);
7966
Peter Huewe22fe9b52011-06-07 21:58:27 +02007967 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7968 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007969 return ret;
7970
Zhaoleif2d84b62009-08-07 18:55:48 +08007971 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007972 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007973
7974 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007975 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007976 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007977
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007978 if (ret < 0)
7979 return ret;
7980
Steven Rostedta8259072009-02-26 22:19:12 -05007981 *ppos += cnt;
7982
7983 return cnt;
7984}
7985
Steven Rostedta8259072009-02-26 22:19:12 -05007986static const struct file_operations trace_options_core_fops = {
7987 .open = tracing_open_generic,
7988 .read = trace_options_core_read,
7989 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007990 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007991};
7992
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007993struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007994 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007995 struct dentry *parent,
7996 void *data,
7997 const struct file_operations *fops)
7998{
7999 struct dentry *ret;
8000
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008001 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008002 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008003 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008004
8005 return ret;
8006}
8007
8008
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008009static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008010{
8011 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008012
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008013 if (tr->options)
8014 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008015
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008016 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008017 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008018 return NULL;
8019
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008020 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008021 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008022 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008023 return NULL;
8024 }
8025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008026 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008027}
8028
Steven Rostedt577b7852009-02-26 23:43:05 -05008029static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008030create_trace_option_file(struct trace_array *tr,
8031 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008032 struct tracer_flags *flags,
8033 struct tracer_opt *opt)
8034{
8035 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008036
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008037 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008038 if (!t_options)
8039 return;
8040
8041 topt->flags = flags;
8042 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008043 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008044
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008045 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008046 &trace_options_fops);
8047
Steven Rostedt577b7852009-02-26 23:43:05 -05008048}
8049
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008050static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008051create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008052{
8053 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008054 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008055 struct tracer_flags *flags;
8056 struct tracer_opt *opts;
8057 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008058 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008059
8060 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008061 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008062
8063 flags = tracer->flags;
8064
8065 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008066 return;
8067
8068 /*
8069 * If this is an instance, only create flags for tracers
8070 * the instance may have.
8071 */
8072 if (!trace_ok_for_array(tracer, tr))
8073 return;
8074
8075 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008076 /* Make sure there's no duplicate flags. */
8077 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008078 return;
8079 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008080
8081 opts = flags->opts;
8082
8083 for (cnt = 0; opts[cnt].name; cnt++)
8084 ;
8085
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008086 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008087 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008088 return;
8089
8090 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8091 GFP_KERNEL);
8092 if (!tr_topts) {
8093 kfree(topts);
8094 return;
8095 }
8096
8097 tr->topts = tr_topts;
8098 tr->topts[tr->nr_topts].tracer = tracer;
8099 tr->topts[tr->nr_topts].topts = topts;
8100 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008101
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008102 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008103 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008104 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008105 WARN_ONCE(topts[cnt].entry == NULL,
8106 "Failed to create trace option: %s",
8107 opts[cnt].name);
8108 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008109}
8110
Steven Rostedta8259072009-02-26 22:19:12 -05008111static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008112create_trace_option_core_file(struct trace_array *tr,
8113 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008114{
8115 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008116
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008117 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008118 if (!t_options)
8119 return NULL;
8120
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008121 return trace_create_file(option, 0644, t_options,
8122 (void *)&tr->trace_flags_index[index],
8123 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008124}
8125
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008126static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008127{
8128 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008129 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008130 int i;
8131
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008132 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008133 if (!t_options)
8134 return;
8135
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008136 for (i = 0; trace_options[i]; i++) {
8137 if (top_level ||
8138 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8139 create_trace_option_core_file(tr, trace_options[i], i);
8140 }
Steven Rostedta8259072009-02-26 22:19:12 -05008141}
8142
Steven Rostedt499e5472012-02-22 15:50:28 -05008143static ssize_t
8144rb_simple_read(struct file *filp, char __user *ubuf,
8145 size_t cnt, loff_t *ppos)
8146{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008147 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008148 char buf[64];
8149 int r;
8150
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008151 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008152 r = sprintf(buf, "%d\n", r);
8153
8154 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8155}
8156
8157static ssize_t
8158rb_simple_write(struct file *filp, const char __user *ubuf,
8159 size_t cnt, loff_t *ppos)
8160{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008161 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008162 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008163 unsigned long val;
8164 int ret;
8165
8166 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8167 if (ret)
8168 return ret;
8169
8170 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008171 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008172 if (!!val == tracer_tracing_is_on(tr)) {
8173 val = 0; /* do nothing */
8174 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008175 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008176 if (tr->current_trace->start)
8177 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008178 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008179 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008180 if (tr->current_trace->stop)
8181 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008182 }
8183 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008184 }
8185
8186 (*ppos)++;
8187
8188 return cnt;
8189}
8190
8191static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008192 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008193 .read = rb_simple_read,
8194 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008195 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008196 .llseek = default_llseek,
8197};
8198
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008199static ssize_t
8200buffer_percent_read(struct file *filp, char __user *ubuf,
8201 size_t cnt, loff_t *ppos)
8202{
8203 struct trace_array *tr = filp->private_data;
8204 char buf[64];
8205 int r;
8206
8207 r = tr->buffer_percent;
8208 r = sprintf(buf, "%d\n", r);
8209
8210 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8211}
8212
8213static ssize_t
8214buffer_percent_write(struct file *filp, const char __user *ubuf,
8215 size_t cnt, loff_t *ppos)
8216{
8217 struct trace_array *tr = filp->private_data;
8218 unsigned long val;
8219 int ret;
8220
8221 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8222 if (ret)
8223 return ret;
8224
8225 if (val > 100)
8226 return -EINVAL;
8227
8228 if (!val)
8229 val = 1;
8230
8231 tr->buffer_percent = val;
8232
8233 (*ppos)++;
8234
8235 return cnt;
8236}
8237
8238static const struct file_operations buffer_percent_fops = {
8239 .open = tracing_open_generic_tr,
8240 .read = buffer_percent_read,
8241 .write = buffer_percent_write,
8242 .release = tracing_release_generic_tr,
8243 .llseek = default_llseek,
8244};
8245
YueHaibingff585c52019-06-14 23:32:10 +08008246static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008247
8248static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008249init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008250
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008251static int
8252allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008253{
8254 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008255
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008256 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008257
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008258 buf->tr = tr;
8259
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008260 buf->buffer = ring_buffer_alloc(size, rb_flags);
8261 if (!buf->buffer)
8262 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008263
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008264 buf->data = alloc_percpu(struct trace_array_cpu);
8265 if (!buf->data) {
8266 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008267 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008268 return -ENOMEM;
8269 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008270
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008271 /* Allocate the first page for all buffers */
8272 set_buffer_entries(&tr->trace_buffer,
8273 ring_buffer_size(tr->trace_buffer.buffer, 0));
8274
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008275 return 0;
8276}
8277
8278static int allocate_trace_buffers(struct trace_array *tr, int size)
8279{
8280 int ret;
8281
8282 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8283 if (ret)
8284 return ret;
8285
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008286#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008287 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8288 allocate_snapshot ? size : 1);
8289 if (WARN_ON(ret)) {
8290 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008291 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008292 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008293 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008294 return -ENOMEM;
8295 }
8296 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008297
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008298 /*
8299 * Only the top level trace array gets its snapshot allocated
8300 * from the kernel command line.
8301 */
8302 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008303#endif
8304 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008305}
8306
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008307static void free_trace_buffer(struct trace_buffer *buf)
8308{
8309 if (buf->buffer) {
8310 ring_buffer_free(buf->buffer);
8311 buf->buffer = NULL;
8312 free_percpu(buf->data);
8313 buf->data = NULL;
8314 }
8315}
8316
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008317static void free_trace_buffers(struct trace_array *tr)
8318{
8319 if (!tr)
8320 return;
8321
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008322 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008323
8324#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008325 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008326#endif
8327}
8328
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008329static void init_trace_flags_index(struct trace_array *tr)
8330{
8331 int i;
8332
8333 /* Used by the trace options files */
8334 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8335 tr->trace_flags_index[i] = i;
8336}
8337
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008338static void __update_tracer_options(struct trace_array *tr)
8339{
8340 struct tracer *t;
8341
8342 for (t = trace_types; t; t = t->next)
8343 add_tracer_options(tr, t);
8344}
8345
8346static void update_tracer_options(struct trace_array *tr)
8347{
8348 mutex_lock(&trace_types_lock);
8349 __update_tracer_options(tr);
8350 mutex_unlock(&trace_types_lock);
8351}
8352
Divya Indif45d1222019-03-20 11:28:51 -07008353struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008354{
Steven Rostedt277ba042012-08-03 16:10:49 -04008355 struct trace_array *tr;
8356 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008357
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008358 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008359 mutex_lock(&trace_types_lock);
8360
8361 ret = -EEXIST;
8362 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8363 if (tr->name && strcmp(tr->name, name) == 0)
8364 goto out_unlock;
8365 }
8366
8367 ret = -ENOMEM;
8368 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8369 if (!tr)
8370 goto out_unlock;
8371
8372 tr->name = kstrdup(name, GFP_KERNEL);
8373 if (!tr->name)
8374 goto out_free_tr;
8375
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008376 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8377 goto out_free_tr;
8378
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008379 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008380
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008381 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8382
Steven Rostedt277ba042012-08-03 16:10:49 -04008383 raw_spin_lock_init(&tr->start_lock);
8384
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008385 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8386
Steven Rostedt277ba042012-08-03 16:10:49 -04008387 tr->current_trace = &nop_trace;
8388
8389 INIT_LIST_HEAD(&tr->systems);
8390 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008391 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008392 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008393
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008394 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008395 goto out_free_tr;
8396
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008397 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008398 if (!tr->dir)
8399 goto out_free_tr;
8400
8401 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008402 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008403 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008404 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008405 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008406
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008407 ftrace_init_trace_array(tr);
8408
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008409 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008410 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008411 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008412
8413 list_add(&tr->list, &ftrace_trace_arrays);
8414
8415 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008416 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008417
Divya Indif45d1222019-03-20 11:28:51 -07008418 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008419
8420 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008421 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008422 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008423 kfree(tr->name);
8424 kfree(tr);
8425
8426 out_unlock:
8427 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008428 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008429
Divya Indif45d1222019-03-20 11:28:51 -07008430 return ERR_PTR(ret);
8431}
8432EXPORT_SYMBOL_GPL(trace_array_create);
Steven Rostedt277ba042012-08-03 16:10:49 -04008433
Divya Indif45d1222019-03-20 11:28:51 -07008434static int instance_mkdir(const char *name)
8435{
8436 return PTR_ERR_OR_ZERO(trace_array_create(name));
Steven Rostedt277ba042012-08-03 16:10:49 -04008437}
8438
Divya Indif45d1222019-03-20 11:28:51 -07008439static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008440{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008441 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008442
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05008443 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Divya Indif45d1222019-03-20 11:28:51 -07008444 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008445
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008446 list_del(&tr->list);
8447
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008448 /* Disable all the flags that were enabled coming in */
8449 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8450 if ((1 << i) & ZEROED_TRACE_FLAGS)
8451 set_tracer_flag(tr, 1 << i, 0);
8452 }
8453
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008454 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308455 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008456 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008457 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008458 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08008459 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008460 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008461
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008462 for (i = 0; i < tr->nr_topts; i++) {
8463 kfree(tr->topts[i].topts);
8464 }
8465 kfree(tr->topts);
8466
Chunyu Hudb9108e02017-07-20 18:36:09 +08008467 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008468 kfree(tr->name);
8469 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008470 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008471
Divya Indif45d1222019-03-20 11:28:51 -07008472 return 0;
8473}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008474
Divya Indif45d1222019-03-20 11:28:51 -07008475int trace_array_destroy(struct trace_array *tr)
8476{
8477 int ret;
8478
8479 if (!tr)
8480 return -EINVAL;
8481
8482 mutex_lock(&event_mutex);
8483 mutex_lock(&trace_types_lock);
8484
8485 ret = __remove_instance(tr);
8486
8487 mutex_unlock(&trace_types_lock);
8488 mutex_unlock(&event_mutex);
8489
8490 return ret;
8491}
8492EXPORT_SYMBOL_GPL(trace_array_destroy);
8493
8494static int instance_rmdir(const char *name)
8495{
8496 struct trace_array *tr;
8497 int ret;
8498
8499 mutex_lock(&event_mutex);
8500 mutex_lock(&trace_types_lock);
8501
8502 ret = -ENODEV;
8503 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8504 if (tr->name && strcmp(tr->name, name) == 0) {
8505 ret = __remove_instance(tr);
8506 break;
8507 }
8508 }
8509
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008510 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008511 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008512
8513 return ret;
8514}
8515
Steven Rostedt277ba042012-08-03 16:10:49 -04008516static __init void create_trace_instances(struct dentry *d_tracer)
8517{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008518 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8519 instance_mkdir,
8520 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008521 if (WARN_ON(!trace_instance_dir))
8522 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008523}
8524
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008525static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008526init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008527{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008528 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008529 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008530
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008531 trace_create_file("available_tracers", 0444, d_tracer,
8532 tr, &show_traces_fops);
8533
8534 trace_create_file("current_tracer", 0644, d_tracer,
8535 tr, &set_tracer_fops);
8536
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008537 trace_create_file("tracing_cpumask", 0644, d_tracer,
8538 tr, &tracing_cpumask_fops);
8539
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008540 trace_create_file("trace_options", 0644, d_tracer,
8541 tr, &tracing_iter_fops);
8542
8543 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008544 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008545
8546 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008547 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008548
8549 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008550 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008551
8552 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8553 tr, &tracing_total_entries_fops);
8554
Wang YanQing238ae932013-05-26 16:52:01 +08008555 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008556 tr, &tracing_free_buffer_fops);
8557
8558 trace_create_file("trace_marker", 0220, d_tracer,
8559 tr, &tracing_mark_fops);
8560
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008561 file = __find_event_file(tr, "ftrace", "print");
8562 if (file && file->dir)
8563 trace_create_file("trigger", 0644, file->dir, file,
8564 &event_trigger_fops);
8565 tr->trace_marker_file = file;
8566
Steven Rostedtfa32e852016-07-06 15:25:08 -04008567 trace_create_file("trace_marker_raw", 0220, d_tracer,
8568 tr, &tracing_mark_raw_fops);
8569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008570 trace_create_file("trace_clock", 0644, d_tracer, tr,
8571 &trace_clock_fops);
8572
8573 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008574 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008575
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008576 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8577 &trace_time_stamp_mode_fops);
8578
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008579 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008580
8581 trace_create_file("buffer_percent", 0444, d_tracer,
8582 tr, &buffer_percent_fops);
8583
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008584 create_trace_options_dir(tr);
8585
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008586#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008587 trace_create_file("tracing_max_latency", 0644, d_tracer,
8588 &tr->max_latency, &tracing_max_lat_fops);
8589#endif
8590
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008591 if (ftrace_create_function_files(tr, d_tracer))
8592 WARN(1, "Could not allocate function filter files");
8593
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008594#ifdef CONFIG_TRACER_SNAPSHOT
8595 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008596 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008597#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008598
Tom Zanussi8a062902019-03-31 18:48:15 -05008599 trace_create_file("error_log", 0644, d_tracer,
8600 tr, &tracing_err_log_fops);
8601
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008602 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008603 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008604
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008605 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008606}
8607
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008608static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008609{
8610 struct vfsmount *mnt;
8611 struct file_system_type *type;
8612
8613 /*
8614 * To maintain backward compatibility for tools that mount
8615 * debugfs to get to the tracing facility, tracefs is automatically
8616 * mounted to the debugfs/tracing directory.
8617 */
8618 type = get_fs_type("tracefs");
8619 if (!type)
8620 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008621 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008622 put_filesystem(type);
8623 if (IS_ERR(mnt))
8624 return NULL;
8625 mntget(mnt);
8626
8627 return mnt;
8628}
8629
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008630/**
8631 * tracing_init_dentry - initialize top level trace array
8632 *
8633 * This is called when creating files or directories in the tracing
8634 * directory. It is called via fs_initcall() by any of the boot up code
8635 * and expects to return the dentry of the top level tracing directory.
8636 */
8637struct dentry *tracing_init_dentry(void)
8638{
8639 struct trace_array *tr = &global_trace;
8640
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008641 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008642 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008643 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008644
Jiaxing Wang8b129192015-11-06 16:04:16 +08008645 if (WARN_ON(!tracefs_initialized()) ||
8646 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8647 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008648 return ERR_PTR(-ENODEV);
8649
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008650 /*
8651 * As there may still be users that expect the tracing
8652 * files to exist in debugfs/tracing, we must automount
8653 * the tracefs file system there, so older tools still
8654 * work with the newer kerenl.
8655 */
8656 tr->dir = debugfs_create_automount("tracing", NULL,
8657 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008658
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008659 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008660}
8661
Jeremy Linton00f4b652017-05-31 16:56:43 -05008662extern struct trace_eval_map *__start_ftrace_eval_maps[];
8663extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008664
Jeremy Linton5f60b352017-05-31 16:56:47 -05008665static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008666{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008667 int len;
8668
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008669 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008670 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008671}
8672
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008673#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008674static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008675{
Jeremy Linton99be6472017-05-31 16:56:44 -05008676 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008677 return;
8678
8679 /*
8680 * Modules with bad taint do not have events created, do
8681 * not bother with enums either.
8682 */
8683 if (trace_module_has_bad_taint(mod))
8684 return;
8685
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008686 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008687}
8688
Jeremy Linton681bec02017-05-31 16:56:53 -05008689#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008690static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008691{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008692 union trace_eval_map_item *map;
8693 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008694
Jeremy Linton99be6472017-05-31 16:56:44 -05008695 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008696 return;
8697
Jeremy Linton1793ed92017-05-31 16:56:46 -05008698 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008699
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008700 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008701
8702 while (map) {
8703 if (map->head.mod == mod)
8704 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008705 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008706 last = &map->tail.next;
8707 map = map->tail.next;
8708 }
8709 if (!map)
8710 goto out;
8711
Jeremy Linton5f60b352017-05-31 16:56:47 -05008712 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008713 kfree(map);
8714 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008715 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008716}
8717#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008718static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008719#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008720
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008721static int trace_module_notify(struct notifier_block *self,
8722 unsigned long val, void *data)
8723{
8724 struct module *mod = data;
8725
8726 switch (val) {
8727 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008728 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008729 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008730 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008731 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008732 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008733 }
8734
8735 return 0;
8736}
8737
8738static struct notifier_block trace_module_nb = {
8739 .notifier_call = trace_module_notify,
8740 .priority = 0,
8741};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008742#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008743
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008744static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008745{
8746 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008747
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008748 trace_access_lock_init();
8749
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008750 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008751 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008752 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008753
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04008754 event_trace_init();
8755
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008756 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008757 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008758
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008759 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008760 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008761
Li Zefan339ae5d2009-04-17 10:34:30 +08008762 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008763 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008764
Avadh Patel69abe6a2009-04-10 16:04:48 -04008765 trace_create_file("saved_cmdlines", 0444, d_tracer,
8766 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008767
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008768 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8769 NULL, &tracing_saved_cmdlines_size_fops);
8770
Michael Sartain99c621d2017-07-05 22:07:15 -06008771 trace_create_file("saved_tgids", 0444, d_tracer,
8772 NULL, &tracing_saved_tgids_fops);
8773
Jeremy Linton5f60b352017-05-31 16:56:47 -05008774 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008775
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008776 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008777
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008778#ifdef CONFIG_MODULES
8779 register_module_notifier(&trace_module_nb);
8780#endif
8781
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008782#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008783 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8784 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008785#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008786
Steven Rostedt277ba042012-08-03 16:10:49 -04008787 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008788
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008789 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008790
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008791 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008792}
8793
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008794static int trace_panic_handler(struct notifier_block *this,
8795 unsigned long event, void *unused)
8796{
Steven Rostedt944ac422008-10-23 19:26:08 -04008797 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008798 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008799 return NOTIFY_OK;
8800}
8801
8802static struct notifier_block trace_panic_notifier = {
8803 .notifier_call = trace_panic_handler,
8804 .next = NULL,
8805 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8806};
8807
8808static int trace_die_handler(struct notifier_block *self,
8809 unsigned long val,
8810 void *data)
8811{
8812 switch (val) {
8813 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008814 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008815 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008816 break;
8817 default:
8818 break;
8819 }
8820 return NOTIFY_OK;
8821}
8822
8823static struct notifier_block trace_die_notifier = {
8824 .notifier_call = trace_die_handler,
8825 .priority = 200
8826};
8827
8828/*
8829 * printk is set to max of 1024, we really don't need it that big.
8830 * Nothing should be printing 1000 characters anyway.
8831 */
8832#define TRACE_MAX_PRINT 1000
8833
8834/*
8835 * Define here KERN_TRACE so that we have one place to modify
8836 * it if we decide to change what log level the ftrace dump
8837 * should be at.
8838 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008839#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008840
Jason Wessel955b61e2010-08-05 09:22:23 -05008841void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008842trace_printk_seq(struct trace_seq *s)
8843{
8844 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008845 if (s->seq.len >= TRACE_MAX_PRINT)
8846 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008847
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008848 /*
8849 * More paranoid code. Although the buffer size is set to
8850 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8851 * an extra layer of protection.
8852 */
8853 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8854 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008855
8856 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008857 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008858
8859 printk(KERN_TRACE "%s", s->buffer);
8860
Steven Rostedtf9520752009-03-02 14:04:40 -05008861 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008862}
8863
Jason Wessel955b61e2010-08-05 09:22:23 -05008864void trace_init_global_iter(struct trace_iterator *iter)
8865{
8866 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008867 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008868 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008869 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008870
8871 if (iter->trace && iter->trace->open)
8872 iter->trace->open(iter);
8873
8874 /* Annotate start of buffers if we had overruns */
8875 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8876 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8877
8878 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8879 if (trace_clocks[iter->tr->clock_id].in_ns)
8880 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008881}
8882
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008883void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008884{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008885 /* use static because iter can be a bit big for the stack */
8886 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008887 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008888 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008889 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008890 unsigned long flags;
8891 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008892
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008893 /* Only allow one dump user at a time. */
8894 if (atomic_inc_return(&dump_running) != 1) {
8895 atomic_dec(&dump_running);
8896 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008897 }
8898
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008899 /*
8900 * Always turn off tracing when we dump.
8901 * We don't need to show trace output of what happens
8902 * between multiple crashes.
8903 *
8904 * If the user does a sysrq-z, then they can re-enable
8905 * tracing with echo 1 > tracing_on.
8906 */
8907 tracing_off();
8908
8909 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02008910 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008911
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008912 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008913 trace_init_global_iter(&iter);
8914
Steven Rostedtd7690412008-10-01 00:29:53 -04008915 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308916 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008917 }
8918
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008919 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008920
Török Edwinb54d3de2008-11-22 13:28:48 +02008921 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008922 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008923
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008924 switch (oops_dump_mode) {
8925 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008926 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008927 break;
8928 case DUMP_ORIG:
8929 iter.cpu_file = raw_smp_processor_id();
8930 break;
8931 case DUMP_NONE:
8932 goto out_enable;
8933 default:
8934 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008935 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008936 }
8937
8938 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008939
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008940 /* Did function tracer already get disabled? */
8941 if (ftrace_is_dead()) {
8942 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8943 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8944 }
8945
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008946 /*
8947 * We need to stop all tracing on all CPUS to read the
8948 * the next buffer. This is a bit expensive, but is
8949 * not done often. We fill all what we can read,
8950 * and then release the locks again.
8951 */
8952
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008953 while (!trace_empty(&iter)) {
8954
8955 if (!cnt)
8956 printk(KERN_TRACE "---------------------------------\n");
8957
8958 cnt++;
8959
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02008960 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008961 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008962
Jason Wessel955b61e2010-08-05 09:22:23 -05008963 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008964 int ret;
8965
8966 ret = print_trace_line(&iter);
8967 if (ret != TRACE_TYPE_NO_CONSUME)
8968 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008969 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008970 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008971
8972 trace_printk_seq(&iter.seq);
8973 }
8974
8975 if (!cnt)
8976 printk(KERN_TRACE " (ftrace buffer empty)\n");
8977 else
8978 printk(KERN_TRACE "---------------------------------\n");
8979
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008980 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008981 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008982
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008983 for_each_tracing_cpu(cpu) {
8984 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008985 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02008986 atomic_dec(&dump_running);
8987 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008988 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008989}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008990EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008991
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008992int trace_run_command(const char *buf, int (*createfn)(int, char **))
8993{
8994 char **argv;
8995 int argc, ret;
8996
8997 argc = 0;
8998 ret = 0;
8999 argv = argv_split(GFP_KERNEL, buf, &argc);
9000 if (!argv)
9001 return -ENOMEM;
9002
9003 if (argc)
9004 ret = createfn(argc, argv);
9005
9006 argv_free(argv);
9007
9008 return ret;
9009}
9010
9011#define WRITE_BUFSIZE 4096
9012
9013ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9014 size_t count, loff_t *ppos,
9015 int (*createfn)(int, char **))
9016{
9017 char *kbuf, *buf, *tmp;
9018 int ret = 0;
9019 size_t done = 0;
9020 size_t size;
9021
9022 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9023 if (!kbuf)
9024 return -ENOMEM;
9025
9026 while (done < count) {
9027 size = count - done;
9028
9029 if (size >= WRITE_BUFSIZE)
9030 size = WRITE_BUFSIZE - 1;
9031
9032 if (copy_from_user(kbuf, buffer + done, size)) {
9033 ret = -EFAULT;
9034 goto out;
9035 }
9036 kbuf[size] = '\0';
9037 buf = kbuf;
9038 do {
9039 tmp = strchr(buf, '\n');
9040 if (tmp) {
9041 *tmp = '\0';
9042 size = tmp - buf + 1;
9043 } else {
9044 size = strlen(buf);
9045 if (done + size < count) {
9046 if (buf != kbuf)
9047 break;
9048 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9049 pr_warn("Line length is too long: Should be less than %d\n",
9050 WRITE_BUFSIZE - 2);
9051 ret = -EINVAL;
9052 goto out;
9053 }
9054 }
9055 done += size;
9056
9057 /* Remove comments */
9058 tmp = strchr(buf, '#');
9059
9060 if (tmp)
9061 *tmp = '\0';
9062
9063 ret = trace_run_command(buf, createfn);
9064 if (ret)
9065 goto out;
9066 buf += size;
9067
9068 } while (done < count);
9069 }
9070 ret = done;
9071
9072out:
9073 kfree(kbuf);
9074
9075 return ret;
9076}
9077
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009078__init static int tracer_alloc_buffers(void)
9079{
Steven Rostedt73c51622009-03-11 13:42:01 -04009080 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309081 int ret = -ENOMEM;
9082
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009083 /*
9084 * Make sure we don't accidently add more trace options
9085 * than we have bits for.
9086 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009087 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009088
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309089 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9090 goto out;
9091
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009092 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309093 goto out_free_buffer_mask;
9094
Steven Rostedt07d777f2011-09-22 14:01:55 -04009095 /* Only allocate trace_printk buffers if a trace_printk exists */
9096 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009097 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009098 trace_printk_init_buffers();
9099
Steven Rostedt73c51622009-03-11 13:42:01 -04009100 /* To save memory, keep the ring buffer size to its minimum */
9101 if (ring_buffer_expanded)
9102 ring_buf_size = trace_buf_size;
9103 else
9104 ring_buf_size = 1;
9105
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309106 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009107 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009108
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009109 raw_spin_lock_init(&global_trace.start_lock);
9110
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009111 /*
9112 * The prepare callbacks allocates some memory for the ring buffer. We
9113 * don't free the buffer if the if the CPU goes down. If we were to free
9114 * the buffer, then the user would lose any trace that was in the
9115 * buffer. The memory will be removed once the "instance" is removed.
9116 */
9117 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9118 "trace/RB:preapre", trace_rb_cpu_prepare,
9119 NULL);
9120 if (ret < 0)
9121 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009122 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009123 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009124 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9125 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009126 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009127
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009128 if (trace_create_savedcmd() < 0)
9129 goto out_free_temp_buffer;
9130
Steven Rostedtab464282008-05-12 21:21:00 +02009131 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009132 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009133 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9134 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009135 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009136 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009137
Steven Rostedt499e5472012-02-22 15:50:28 -05009138 if (global_trace.buffer_disabled)
9139 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009140
Steven Rostedte1e232c2014-02-10 23:38:46 -05009141 if (trace_boot_clock) {
9142 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9143 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009144 pr_warn("Trace clock %s not defined, going back to default\n",
9145 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009146 }
9147
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009148 /*
9149 * register_tracer() might reference current_trace, so it
9150 * needs to be set before we register anything. This is
9151 * just a bootstrap of current_trace anyway.
9152 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009153 global_trace.current_trace = &nop_trace;
9154
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009155 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9156
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009157 ftrace_init_global_array_ops(&global_trace);
9158
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009159 init_trace_flags_index(&global_trace);
9160
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009161 register_tracer(&nop_trace);
9162
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009163 /* Function tracing may start here (via kernel command line) */
9164 init_function_trace();
9165
Steven Rostedt60a11772008-05-12 21:20:44 +02009166 /* All seems OK, enable tracing */
9167 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009168
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009169 atomic_notifier_chain_register(&panic_notifier_list,
9170 &trace_panic_notifier);
9171
9172 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009173
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009174 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9175
9176 INIT_LIST_HEAD(&global_trace.systems);
9177 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009178 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009179 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009180 list_add(&global_trace.list, &ftrace_trace_arrays);
9181
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009182 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009183
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009184 register_snapshot_cmd();
9185
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009186 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009187
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009188out_free_savedcmd:
9189 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009190out_free_temp_buffer:
9191 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009192out_rm_hp_state:
9193 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309194out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009195 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309196out_free_buffer_mask:
9197 free_cpumask_var(tracing_buffer_mask);
9198out:
9199 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009200}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009201
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009202void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009203{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009204 if (tracepoint_printk) {
9205 tracepoint_print_iter =
9206 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9207 if (WARN_ON(!tracepoint_print_iter))
9208 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009209 else
9210 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009211 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009212 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009213}
9214
9215void __init trace_init(void)
9216{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009217 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009218}
9219
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009220__init static int clear_boot_tracer(void)
9221{
9222 /*
9223 * The default tracer at boot buffer is an init section.
9224 * This function is called in lateinit. If we did not
9225 * find the boot tracer, then clear it out, to prevent
9226 * later registration from accessing the buffer that is
9227 * about to be freed.
9228 */
9229 if (!default_bootup_tracer)
9230 return 0;
9231
9232 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9233 default_bootup_tracer);
9234 default_bootup_tracer = NULL;
9235
9236 return 0;
9237}
9238
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009239fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009240late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009241
9242#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9243__init static int tracing_set_default_clock(void)
9244{
9245 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009246 if (!trace_boot_clock && !sched_clock_stable()) {
Chris Wilson3fd49c92018-03-30 16:01:31 +01009247 printk(KERN_WARNING
9248 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9249 "If you want to keep using the local clock, then add:\n"
9250 " \"trace_clock=local\"\n"
9251 "on the kernel command line\n");
9252 tracing_set_clock(&global_trace, "global");
9253 }
9254
9255 return 0;
9256}
9257late_initcall_sync(tracing_set_default_clock);
9258#endif