blob: 252f79c435f810f46dbe7bbde60602818b676706 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040021#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050022#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020023#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050024#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020025#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020026#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040029#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020030#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050033#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040034#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010035#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050036#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080037#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020039#include <linux/ctype.h>
40#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020041#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050042#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080044#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010045#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060046#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020047
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050049#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020050
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010051/*
Steven Rostedt73c51622009-03-11 13:42:01 -040052 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050055bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040056
57/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010058 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010061 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062 * at the same time, giving false positive or negative results.
63 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010064static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010065
Steven Rostedtb2821ae2009-02-02 21:38:32 -050066/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
Li Zefan020e5f82009-07-01 10:47:05 +080069bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050071/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050074static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010076/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050081static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010083{
84 return 0;
85}
Steven Rostedt0f048702008-11-05 16:05:44 -050086
87/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040088 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070092static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040093
94/*
Steven Rostedt0f048702008-11-05 16:05:44 -050095 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100100static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
Jason Wessel955b61e2010-08-05 09:22:23 -0500102cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200103
Steven Rostedt944ac422008-10-23 19:26:08 -0400104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400118 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119
120enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400121
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
Jeremy Linton681bec02017-05-31 16:56:53 -0500125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500127struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400128 struct module *mod;
129 unsigned long length;
130};
131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500134struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400135 /*
136 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500137 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500139 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400140 const char *end; /* points to NULL */
141};
142
Jeremy Linton1793ed92017-05-31 16:56:46 -0500143static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144
145/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500146 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500150 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500153 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400156};
157
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Thomas Gleixnerc438f142019-04-25 11:45:15 +0200162static void ftrace_trace_userstack(struct ring_buffer *buffer,
163 unsigned long flags, int pc);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500164
Li Zefanee6c2c12009-09-18 14:06:47 +0800165#define MAX_TRACER_SIZE 100
166static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500167static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500169static bool allocate_snapshot;
170
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200171static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172{
Chen Gang67012ab2013-04-08 12:06:44 +0800173 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500174 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400175 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500176 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100177 return 1;
178}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200179__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100180
Steven Rostedt944ac422008-10-23 19:26:08 -0400181static int __init set_ftrace_dump_on_oops(char *str)
182{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200183 if (*str++ != '=' || !*str) {
184 ftrace_dump_on_oops = DUMP_ALL;
185 return 1;
186 }
187
188 if (!strcmp("orig_cpu", str)) {
189 ftrace_dump_on_oops = DUMP_ORIG;
190 return 1;
191 }
192
193 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400194}
195__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200196
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197static int __init stop_trace_on_warning(char *str)
198{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
200 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400201 return 1;
202}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200203__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400204
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400205static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500206{
207 allocate_snapshot = true;
208 /* We also need the main ring buffer expanded */
209 ring_buffer_expanded = true;
210 return 1;
211}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400212__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500213
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214
215static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216
217static int __init set_trace_boot_options(char *str)
218{
Chen Gang67012ab2013-04-08 12:06:44 +0800219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400220 return 0;
221}
222__setup("trace_options=", set_trace_boot_options);
223
Steven Rostedte1e232c2014-02-10 23:38:46 -0500224static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
225static char *trace_boot_clock __initdata;
226
227static int __init set_trace_boot_clock(char *str)
228{
229 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
230 trace_boot_clock = trace_boot_clock_buf;
231 return 0;
232}
233__setup("trace_clock=", set_trace_boot_clock);
234
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500235static int __init set_tracepoint_printk(char *str)
236{
237 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
238 tracepoint_printk = 1;
239 return 1;
240}
241__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400242
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100243unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200244{
245 nsec += 500;
246 do_div(nsec, 1000);
247 return nsec;
248}
249
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400250/* trace_flags holds trace_options default values */
251#define TRACE_DEFAULT_FLAGS \
252 (FUNCTION_DEFAULT_FLAGS | \
253 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
254 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
255 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
256 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
257
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400258/* trace_options that are only supported by global_trace */
259#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
260 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
261
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400262/* trace_flags that are default zero for instances */
263#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900264 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400265
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200266/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800267 * The global_trace is the descriptor that holds the top-level tracing
268 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200269 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400270static struct trace_array global_trace = {
271 .trace_flags = TRACE_DEFAULT_FLAGS,
272};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200273
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400274LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400276int trace_array_get(struct trace_array *this_tr)
277{
278 struct trace_array *tr;
279 int ret = -ENODEV;
280
281 mutex_lock(&trace_types_lock);
282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
283 if (tr == this_tr) {
284 tr->ref++;
285 ret = 0;
286 break;
287 }
288 }
289 mutex_unlock(&trace_types_lock);
290
291 return ret;
292}
293
294static void __trace_array_put(struct trace_array *this_tr)
295{
296 WARN_ON(!this_tr->ref);
297 this_tr->ref--;
298}
299
300void trace_array_put(struct trace_array *this_tr)
301{
302 mutex_lock(&trace_types_lock);
303 __trace_array_put(this_tr);
304 mutex_unlock(&trace_types_lock);
305}
306
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400307int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500308 struct ring_buffer *buffer,
309 struct ring_buffer_event *event)
310{
311 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
312 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400313 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500314 return 1;
315 }
316
317 return 0;
318}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500319
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400320void trace_free_pid_list(struct trace_pid_list *pid_list)
321{
322 vfree(pid_list->pids);
323 kfree(pid_list);
324}
325
Steven Rostedtd8275c42016-04-14 12:15:22 -0400326/**
327 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
328 * @filtered_pids: The list of pids to check
329 * @search_pid: The PID to find in @filtered_pids
330 *
331 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
332 */
333bool
334trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
335{
336 /*
337 * If pid_max changed after filtered_pids was created, we
338 * by default ignore all pids greater than the previous pid_max.
339 */
340 if (search_pid >= filtered_pids->pid_max)
341 return false;
342
343 return test_bit(search_pid, filtered_pids->pids);
344}
345
346/**
347 * trace_ignore_this_task - should a task be ignored for tracing
348 * @filtered_pids: The list of pids to check
349 * @task: The task that should be ignored if not filtered
350 *
351 * Checks if @task should be traced or not from @filtered_pids.
352 * Returns true if @task should *NOT* be traced.
353 * Returns false if @task should be traced.
354 */
355bool
356trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
357{
358 /*
359 * Return false, because if filtered_pids does not exist,
360 * all pids are good to trace.
361 */
362 if (!filtered_pids)
363 return false;
364
365 return !trace_find_filtered_pid(filtered_pids, task->pid);
366}
367
368/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700369 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400370 * @pid_list: The list to modify
371 * @self: The current task for fork or NULL for exit
372 * @task: The task to add or remove
373 *
374 * If adding a task, if @self is defined, the task is only added if @self
375 * is also included in @pid_list. This happens on fork and tasks should
376 * only be added when the parent is listed. If @self is NULL, then the
377 * @task pid will be removed from the list, which would happen on exit
378 * of a task.
379 */
380void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
381 struct task_struct *self,
382 struct task_struct *task)
383{
384 if (!pid_list)
385 return;
386
387 /* For forks, we only add if the forking task is listed */
388 if (self) {
389 if (!trace_find_filtered_pid(pid_list, self->pid))
390 return;
391 }
392
393 /* Sorry, but we don't support pid_max changing after setting */
394 if (task->pid >= pid_list->pid_max)
395 return;
396
397 /* "self" is set for forks, and NULL for exits */
398 if (self)
399 set_bit(task->pid, pid_list->pids);
400 else
401 clear_bit(task->pid, pid_list->pids);
402}
403
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400404/**
405 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
406 * @pid_list: The pid list to show
407 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
408 * @pos: The position of the file
409 *
410 * This is used by the seq_file "next" operation to iterate the pids
411 * listed in a trace_pid_list structure.
412 *
413 * Returns the pid+1 as we want to display pid of zero, but NULL would
414 * stop the iteration.
415 */
416void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
417{
418 unsigned long pid = (unsigned long)v;
419
420 (*pos)++;
421
422 /* pid already is +1 of the actual prevous bit */
423 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
424
425 /* Return pid + 1 to allow zero to be represented */
426 if (pid < pid_list->pid_max)
427 return (void *)(pid + 1);
428
429 return NULL;
430}
431
432/**
433 * trace_pid_start - Used for seq_file to start reading pid lists
434 * @pid_list: The pid list to show
435 * @pos: The position of the file
436 *
437 * This is used by seq_file "start" operation to start the iteration
438 * of listing pids.
439 *
440 * Returns the pid+1 as we want to display pid of zero, but NULL would
441 * stop the iteration.
442 */
443void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
444{
445 unsigned long pid;
446 loff_t l = 0;
447
448 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
449 if (pid >= pid_list->pid_max)
450 return NULL;
451
452 /* Return pid + 1 so that zero can be the exit value */
453 for (pid++; pid && l < *pos;
454 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
455 ;
456 return (void *)pid;
457}
458
459/**
460 * trace_pid_show - show the current pid in seq_file processing
461 * @m: The seq_file structure to write into
462 * @v: A void pointer of the pid (+1) value to display
463 *
464 * Can be directly used by seq_file operations to display the current
465 * pid value.
466 */
467int trace_pid_show(struct seq_file *m, void *v)
468{
469 unsigned long pid = (unsigned long)v - 1;
470
471 seq_printf(m, "%lu\n", pid);
472 return 0;
473}
474
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400475/* 128 should be much more than enough */
476#define PID_BUF_SIZE 127
477
478int trace_pid_write(struct trace_pid_list *filtered_pids,
479 struct trace_pid_list **new_pid_list,
480 const char __user *ubuf, size_t cnt)
481{
482 struct trace_pid_list *pid_list;
483 struct trace_parser parser;
484 unsigned long val;
485 int nr_pids = 0;
486 ssize_t read = 0;
487 ssize_t ret = 0;
488 loff_t pos;
489 pid_t pid;
490
491 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
492 return -ENOMEM;
493
494 /*
495 * Always recreate a new array. The write is an all or nothing
496 * operation. Always create a new array when adding new pids by
497 * the user. If the operation fails, then the current list is
498 * not modified.
499 */
500 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500501 if (!pid_list) {
502 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400503 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500504 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400505
506 pid_list->pid_max = READ_ONCE(pid_max);
507
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
511
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500514 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400515 kfree(pid_list);
516 return -ENOMEM;
517 }
518
519 if (filtered_pids) {
520 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400523 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400524 nr_pids++;
525 }
526 }
527
528 while (cnt > 0) {
529
530 pos = 0;
531
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
534 break;
535
536 read += ret;
537 ubuf += ret;
538 cnt -= ret;
539
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400540 ret = -EINVAL;
541 if (kstrtoul(parser.buffer, 0, &val))
542 break;
543 if (val >= pid_list->pid_max)
544 break;
545
546 pid = (pid_t)val;
547
548 set_bit(pid, pid_list->pids);
549 nr_pids++;
550
551 trace_parser_clear(&parser);
552 ret = 0;
553 }
554 trace_parser_put(&parser);
555
556 if (ret < 0) {
557 trace_free_pid_list(pid_list);
558 return ret;
559 }
560
561 if (!nr_pids) {
562 /* Cleared the list of pids */
563 trace_free_pid_list(pid_list);
564 read = ret;
565 pid_list = NULL;
566 }
567
568 *new_pid_list = pid_list;
569
570 return read;
571}
572
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100573static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574{
575 u64 ts;
576
577 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700578 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400579 return trace_clock_local();
580
Alexander Z Lam94571582013-08-02 18:36:16 -0700581 ts = ring_buffer_time_stamp(buf->buffer, cpu);
582 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400583
584 return ts;
585}
586
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100587u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700588{
589 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
590}
591
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400592/**
593 * tracing_is_enabled - Show if global_trace has been disabled
594 *
595 * Shows if the global trace has been enabled or not. It uses the
596 * mirror flag "buffer_disabled" to be used in fast paths such as for
597 * the irqsoff tracer. But it may be inaccurate due to races. If you
598 * need to know the accurate state, use tracing_is_on() which is a little
599 * slower, but accurate.
600 */
Steven Rostedt90369902008-11-05 16:05:44 -0500601int tracing_is_enabled(void)
602{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400603 /*
604 * For quick access (irqsoff uses this in fast path), just
605 * return the mirror variable of the state of the ring buffer.
606 * It's a little racy, but we don't really care.
607 */
608 smp_rmb();
609 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500610}
611
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200612/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400613 * trace_buf_size is the size in bytes that is allocated
614 * for a buffer. Note, the number of bytes is always rounded
615 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400616 *
617 * This number is purposely set to a low number of 16384.
618 * If the dump on oops happens, it will be much appreciated
619 * to not have to wait for all that output. Anyway this can be
620 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400622#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400623
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400624static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200625
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200627static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200629/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200631 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700632DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800634/*
635 * serialize the access of the ring buffer
636 *
637 * ring buffer serializes readers, but it is low level protection.
638 * The validity of the events (which returns by ring_buffer_peek() ..etc)
639 * are not protected by ring buffer.
640 *
641 * The content of events may become garbage if we allow other process consumes
642 * these events concurrently:
643 * A) the page of the consumed events may become a normal page
644 * (not reader page) in ring buffer, and this page will be rewrited
645 * by events producer.
646 * B) The page of the consumed events may become a page for splice_read,
647 * and this page will be returned to system.
648 *
649 * These primitives allow multi process access to different cpu ring buffer
650 * concurrently.
651 *
652 * These primitives don't distinguish read-only and read-consume access.
653 * Multi read-only access are also serialized.
654 */
655
656#ifdef CONFIG_SMP
657static DECLARE_RWSEM(all_cpu_access_lock);
658static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
659
660static inline void trace_access_lock(int cpu)
661{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500662 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800663 /* gain it for accessing the whole ring buffer. */
664 down_write(&all_cpu_access_lock);
665 } else {
666 /* gain it for accessing a cpu ring buffer. */
667
Steven Rostedtae3b5092013-01-23 15:22:59 -0500668 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800669 down_read(&all_cpu_access_lock);
670
671 /* Secondly block other access to this @cpu ring buffer. */
672 mutex_lock(&per_cpu(cpu_access_lock, cpu));
673 }
674}
675
676static inline void trace_access_unlock(int cpu)
677{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500678 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800679 up_write(&all_cpu_access_lock);
680 } else {
681 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
682 up_read(&all_cpu_access_lock);
683 }
684}
685
686static inline void trace_access_lock_init(void)
687{
688 int cpu;
689
690 for_each_possible_cpu(cpu)
691 mutex_init(&per_cpu(cpu_access_lock, cpu));
692}
693
694#else
695
696static DEFINE_MUTEX(access_lock);
697
698static inline void trace_access_lock(int cpu)
699{
700 (void)cpu;
701 mutex_lock(&access_lock);
702}
703
704static inline void trace_access_unlock(int cpu)
705{
706 (void)cpu;
707 mutex_unlock(&access_lock);
708}
709
710static inline void trace_access_lock_init(void)
711{
712}
713
714#endif
715
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400716#ifdef CONFIG_STACKTRACE
717static void __ftrace_trace_stack(struct ring_buffer *buffer,
718 unsigned long flags,
719 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400720static inline void ftrace_trace_stack(struct trace_array *tr,
721 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400724
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400725#else
726static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
727 unsigned long flags,
728 int skip, int pc, struct pt_regs *regs)
729{
730}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400731static inline void ftrace_trace_stack(struct trace_array *tr,
732 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400733 unsigned long flags,
734 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400735{
736}
737
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400738#endif
739
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500740static __always_inline void
741trace_event_setup(struct ring_buffer_event *event,
742 int type, unsigned long flags, int pc)
743{
744 struct trace_entry *ent = ring_buffer_event_data(event);
745
Cong Wang46710f32019-05-25 09:57:59 -0700746 tracing_generic_entry_update(ent, type, flags, pc);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500747}
748
749static __always_inline struct ring_buffer_event *
750__trace_buffer_lock_reserve(struct ring_buffer *buffer,
751 int type,
752 unsigned long len,
753 unsigned long flags, int pc)
754{
755 struct ring_buffer_event *event;
756
757 event = ring_buffer_lock_reserve(buffer, len);
758 if (event != NULL)
759 trace_event_setup(event, type, flags, pc);
760
761 return event;
762}
763
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400764void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400765{
766 if (tr->trace_buffer.buffer)
767 ring_buffer_record_on(tr->trace_buffer.buffer);
768 /*
769 * This flag is looked at when buffers haven't been allocated
770 * yet, or by some tracers (like irqsoff), that just want to
771 * know if the ring buffer has been disabled, but it can handle
772 * races of where it gets disabled but we still do a record.
773 * As the check is in the fast path of the tracers, it is more
774 * important to be fast than accurate.
775 */
776 tr->buffer_disabled = 0;
777 /* Make the flag seen by readers */
778 smp_wmb();
779}
780
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200781/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500782 * tracing_on - enable tracing buffers
783 *
784 * This function enables tracing buffers that may have been
785 * disabled with tracing_off.
786 */
787void tracing_on(void)
788{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400789 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500790}
791EXPORT_SYMBOL_GPL(tracing_on);
792
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500793
794static __always_inline void
795__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
796{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700797 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500798
799 /* If this is the temp buffer, we need to commit fully */
800 if (this_cpu_read(trace_buffered_event) == event) {
801 /* Length is in event->array[0] */
802 ring_buffer_write(buffer, event->array[0], &event->array[1]);
803 /* Release the temp buffer */
804 this_cpu_dec(trace_buffered_event_cnt);
805 } else
806 ring_buffer_unlock_commit(buffer, event);
807}
808
Steven Rostedt499e5472012-02-22 15:50:28 -0500809/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500810 * __trace_puts - write a constant string into the trace buffer.
811 * @ip: The address of the caller
812 * @str: The constant string to write
813 * @size: The size of the string.
814 */
815int __trace_puts(unsigned long ip, const char *str, int size)
816{
817 struct ring_buffer_event *event;
818 struct ring_buffer *buffer;
819 struct print_entry *entry;
820 unsigned long irq_flags;
821 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800822 int pc;
823
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400824 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800825 return 0;
826
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800827 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500829 if (unlikely(tracing_selftest_running || tracing_disabled))
830 return 0;
831
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500832 alloc = sizeof(*entry) + size + 2; /* possible \n added */
833
834 local_save_flags(irq_flags);
835 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500836 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
837 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500838 if (!event)
839 return 0;
840
841 entry = ring_buffer_event_data(event);
842 entry->ip = ip;
843
844 memcpy(&entry->buf, str, size);
845
846 /* Add a newline if necessary */
847 if (entry->buf[size - 1] != '\n') {
848 entry->buf[size] = '\n';
849 entry->buf[size + 1] = '\0';
850 } else
851 entry->buf[size] = '\0';
852
853 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400854 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500855
856 return size;
857}
858EXPORT_SYMBOL_GPL(__trace_puts);
859
860/**
861 * __trace_bputs - write the pointer to a constant string into trace buffer
862 * @ip: The address of the caller
863 * @str: The constant string to write to the buffer to
864 */
865int __trace_bputs(unsigned long ip, const char *str)
866{
867 struct ring_buffer_event *event;
868 struct ring_buffer *buffer;
869 struct bputs_entry *entry;
870 unsigned long irq_flags;
871 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800872 int pc;
873
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400874 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800875 return 0;
876
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800877 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500879 if (unlikely(tracing_selftest_running || tracing_disabled))
880 return 0;
881
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 local_save_flags(irq_flags);
883 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500884 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
885 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500886 if (!event)
887 return 0;
888
889 entry = ring_buffer_event_data(event);
890 entry->ip = ip;
891 entry->str = str;
892
893 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400894 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500895
896 return 1;
897}
898EXPORT_SYMBOL_GPL(__trace_bputs);
899
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500900#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600901void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500902{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500903 struct tracer *tracer = tr->current_trace;
904 unsigned long flags;
905
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500906 if (in_nmi()) {
907 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
908 internal_trace_puts("*** snapshot is being ignored ***\n");
909 return;
910 }
911
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500912 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500913 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
914 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500915 tracing_off();
916 return;
917 }
918
919 /* Note, snapshot can not be used when the tracer uses it */
920 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500921 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
922 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500923 return;
924 }
925
926 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600927 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500928 local_irq_restore(flags);
929}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400930
Tom Zanussia35873a2019-02-13 17:42:45 -0600931void tracing_snapshot_instance(struct trace_array *tr)
932{
933 tracing_snapshot_instance_cond(tr, NULL);
934}
935
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400936/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800937 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400938 *
939 * This causes a swap between the snapshot buffer and the current live
940 * tracing buffer. You can use this to take snapshots of the live
941 * trace when some condition is triggered, but continue to trace.
942 *
943 * Note, make sure to allocate the snapshot with either
944 * a tracing_snapshot_alloc(), or by doing it manually
945 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
946 *
947 * If the snapshot buffer is not allocated, it will stop tracing.
948 * Basically making a permanent snapshot.
949 */
950void tracing_snapshot(void)
951{
952 struct trace_array *tr = &global_trace;
953
954 tracing_snapshot_instance(tr);
955}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500956EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500957
Tom Zanussia35873a2019-02-13 17:42:45 -0600958/**
959 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
960 * @tr: The tracing instance to snapshot
961 * @cond_data: The data to be tested conditionally, and possibly saved
962 *
963 * This is the same as tracing_snapshot() except that the snapshot is
964 * conditional - the snapshot will only happen if the
965 * cond_snapshot.update() implementation receiving the cond_data
966 * returns true, which means that the trace array's cond_snapshot
967 * update() operation used the cond_data to determine whether the
968 * snapshot should be taken, and if it was, presumably saved it along
969 * with the snapshot.
970 */
971void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
972{
973 tracing_snapshot_instance_cond(tr, cond_data);
974}
975EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
976
977/**
978 * tracing_snapshot_cond_data - get the user data associated with a snapshot
979 * @tr: The tracing instance
980 *
981 * When the user enables a conditional snapshot using
982 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
983 * with the snapshot. This accessor is used to retrieve it.
984 *
985 * Should not be called from cond_snapshot.update(), since it takes
986 * the tr->max_lock lock, which the code calling
987 * cond_snapshot.update() has already done.
988 *
989 * Returns the cond_data associated with the trace array's snapshot.
990 */
991void *tracing_cond_snapshot_data(struct trace_array *tr)
992{
993 void *cond_data = NULL;
994
995 arch_spin_lock(&tr->max_lock);
996
997 if (tr->cond_snapshot)
998 cond_data = tr->cond_snapshot->cond_data;
999
1000 arch_spin_unlock(&tr->max_lock);
1001
1002 return cond_data;
1003}
1004EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1005
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001006static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1007 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001008static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1009
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001010int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001011{
1012 int ret;
1013
1014 if (!tr->allocated_snapshot) {
1015
1016 /* allocate spare buffer */
1017 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1018 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1019 if (ret < 0)
1020 return ret;
1021
1022 tr->allocated_snapshot = true;
1023 }
1024
1025 return 0;
1026}
1027
Fabian Frederickad1438a2014-04-17 21:44:42 +02001028static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001029{
1030 /*
1031 * We don't free the ring buffer. instead, resize it because
1032 * The max_tr ring buffer has some state (e.g. ring->clock) and
1033 * we want preserve it.
1034 */
1035 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1036 set_buffer_entries(&tr->max_buffer, 1);
1037 tracing_reset_online_cpus(&tr->max_buffer);
1038 tr->allocated_snapshot = false;
1039}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040
1041/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001042 * tracing_alloc_snapshot - allocate snapshot buffer.
1043 *
1044 * This only allocates the snapshot buffer if it isn't already
1045 * allocated - it doesn't also take a snapshot.
1046 *
1047 * This is meant to be used in cases where the snapshot buffer needs
1048 * to be set up for events that can't sleep but need to be able to
1049 * trigger a snapshot.
1050 */
1051int tracing_alloc_snapshot(void)
1052{
1053 struct trace_array *tr = &global_trace;
1054 int ret;
1055
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001056 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001057 WARN_ON(ret < 0);
1058
1059 return ret;
1060}
1061EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1062
1063/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001064 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001065 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001066 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001067 * snapshot buffer if it isn't already allocated. Use this only
1068 * where it is safe to sleep, as the allocation may sleep.
1069 *
1070 * This causes a swap between the snapshot buffer and the current live
1071 * tracing buffer. You can use this to take snapshots of the live
1072 * trace when some condition is triggered, but continue to trace.
1073 */
1074void tracing_snapshot_alloc(void)
1075{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001076 int ret;
1077
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001078 ret = tracing_alloc_snapshot();
1079 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001080 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001081
1082 tracing_snapshot();
1083}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001084EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001085
1086/**
1087 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1088 * @tr: The tracing instance
1089 * @cond_data: User data to associate with the snapshot
1090 * @update: Implementation of the cond_snapshot update function
1091 *
1092 * Check whether the conditional snapshot for the given instance has
1093 * already been enabled, or if the current tracer is already using a
1094 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1095 * save the cond_data and update function inside.
1096 *
1097 * Returns 0 if successful, error otherwise.
1098 */
1099int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1100 cond_update_fn_t update)
1101{
1102 struct cond_snapshot *cond_snapshot;
1103 int ret = 0;
1104
1105 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1106 if (!cond_snapshot)
1107 return -ENOMEM;
1108
1109 cond_snapshot->cond_data = cond_data;
1110 cond_snapshot->update = update;
1111
1112 mutex_lock(&trace_types_lock);
1113
1114 ret = tracing_alloc_snapshot_instance(tr);
1115 if (ret)
1116 goto fail_unlock;
1117
1118 if (tr->current_trace->use_max_tr) {
1119 ret = -EBUSY;
1120 goto fail_unlock;
1121 }
1122
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001123 /*
1124 * The cond_snapshot can only change to NULL without the
1125 * trace_types_lock. We don't care if we race with it going
1126 * to NULL, but we want to make sure that it's not set to
1127 * something other than NULL when we get here, which we can
1128 * do safely with only holding the trace_types_lock and not
1129 * having to take the max_lock.
1130 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001131 if (tr->cond_snapshot) {
1132 ret = -EBUSY;
1133 goto fail_unlock;
1134 }
1135
1136 arch_spin_lock(&tr->max_lock);
1137 tr->cond_snapshot = cond_snapshot;
1138 arch_spin_unlock(&tr->max_lock);
1139
1140 mutex_unlock(&trace_types_lock);
1141
1142 return ret;
1143
1144 fail_unlock:
1145 mutex_unlock(&trace_types_lock);
1146 kfree(cond_snapshot);
1147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1150
1151/**
1152 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1153 * @tr: The tracing instance
1154 *
1155 * Check whether the conditional snapshot for the given instance is
1156 * enabled; if so, free the cond_snapshot associated with it,
1157 * otherwise return -EINVAL.
1158 *
1159 * Returns 0 if successful, error otherwise.
1160 */
1161int tracing_snapshot_cond_disable(struct trace_array *tr)
1162{
1163 int ret = 0;
1164
1165 arch_spin_lock(&tr->max_lock);
1166
1167 if (!tr->cond_snapshot)
1168 ret = -EINVAL;
1169 else {
1170 kfree(tr->cond_snapshot);
1171 tr->cond_snapshot = NULL;
1172 }
1173
1174 arch_spin_unlock(&tr->max_lock);
1175
1176 return ret;
1177}
1178EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001179#else
1180void tracing_snapshot(void)
1181{
1182 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1183}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001184EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001185void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1186{
1187 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1188}
1189EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001190int tracing_alloc_snapshot(void)
1191{
1192 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1193 return -ENODEV;
1194}
1195EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001196void tracing_snapshot_alloc(void)
1197{
1198 /* Give warning */
1199 tracing_snapshot();
1200}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001201EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001202void *tracing_cond_snapshot_data(struct trace_array *tr)
1203{
1204 return NULL;
1205}
1206EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1207int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1208{
1209 return -ENODEV;
1210}
1211EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1212int tracing_snapshot_cond_disable(struct trace_array *tr)
1213{
1214 return false;
1215}
1216EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001217#endif /* CONFIG_TRACER_SNAPSHOT */
1218
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001219void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001220{
1221 if (tr->trace_buffer.buffer)
1222 ring_buffer_record_off(tr->trace_buffer.buffer);
1223 /*
1224 * This flag is looked at when buffers haven't been allocated
1225 * yet, or by some tracers (like irqsoff), that just want to
1226 * know if the ring buffer has been disabled, but it can handle
1227 * races of where it gets disabled but we still do a record.
1228 * As the check is in the fast path of the tracers, it is more
1229 * important to be fast than accurate.
1230 */
1231 tr->buffer_disabled = 1;
1232 /* Make the flag seen by readers */
1233 smp_wmb();
1234}
1235
Steven Rostedt499e5472012-02-22 15:50:28 -05001236/**
1237 * tracing_off - turn off tracing buffers
1238 *
1239 * This function stops the tracing buffers from recording data.
1240 * It does not disable any overhead the tracers themselves may
1241 * be causing. This function simply causes all recording to
1242 * the ring buffers to fail.
1243 */
1244void tracing_off(void)
1245{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001246 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001247}
1248EXPORT_SYMBOL_GPL(tracing_off);
1249
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001250void disable_trace_on_warning(void)
1251{
1252 if (__disable_trace_on_warning)
1253 tracing_off();
1254}
1255
Steven Rostedt499e5472012-02-22 15:50:28 -05001256/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001257 * tracer_tracing_is_on - show real state of ring buffer enabled
1258 * @tr : the trace array to know if ring buffer is enabled
1259 *
1260 * Shows real state of the ring buffer if it is enabled or not.
1261 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001262bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001263{
1264 if (tr->trace_buffer.buffer)
1265 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1266 return !tr->buffer_disabled;
1267}
1268
Steven Rostedt499e5472012-02-22 15:50:28 -05001269/**
1270 * tracing_is_on - show state of ring buffers enabled
1271 */
1272int tracing_is_on(void)
1273{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001274 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001275}
1276EXPORT_SYMBOL_GPL(tracing_is_on);
1277
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001278static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001279{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001280 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001281
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001282 if (!str)
1283 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001284 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001285 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001286 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001287 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001288 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001289 return 1;
1290}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001291__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001292
Tim Bird0e950172010-02-25 15:36:43 -08001293static int __init set_tracing_thresh(char *str)
1294{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001295 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001296 int ret;
1297
1298 if (!str)
1299 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001300 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001301 if (ret < 0)
1302 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001303 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001304 return 1;
1305}
1306__setup("tracing_thresh=", set_tracing_thresh);
1307
Steven Rostedt57f50be2008-05-12 21:20:44 +02001308unsigned long nsecs_to_usecs(unsigned long nsecs)
1309{
1310 return nsecs / 1000;
1311}
1312
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001313/*
1314 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001315 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001316 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001317 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001318 */
1319#undef C
1320#define C(a, b) b
1321
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001322/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001323static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001324 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001325 NULL
1326};
1327
Zhaolei5079f322009-08-25 16:12:56 +08001328static struct {
1329 u64 (*func)(void);
1330 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001331 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001332} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001333 { trace_clock_local, "local", 1 },
1334 { trace_clock_global, "global", 1 },
1335 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001336 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001337 { trace_clock, "perf", 1 },
1338 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001339 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001340 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001341 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001342};
1343
Tom Zanussi860f9f62018-01-15 20:51:48 -06001344bool trace_clock_in_ns(struct trace_array *tr)
1345{
1346 if (trace_clocks[tr->clock_id].in_ns)
1347 return true;
1348
1349 return false;
1350}
1351
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001352/*
1353 * trace_parser_get_init - gets the buffer for trace parser
1354 */
1355int trace_parser_get_init(struct trace_parser *parser, int size)
1356{
1357 memset(parser, 0, sizeof(*parser));
1358
1359 parser->buffer = kmalloc(size, GFP_KERNEL);
1360 if (!parser->buffer)
1361 return 1;
1362
1363 parser->size = size;
1364 return 0;
1365}
1366
1367/*
1368 * trace_parser_put - frees the buffer for trace parser
1369 */
1370void trace_parser_put(struct trace_parser *parser)
1371{
1372 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001373 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001374}
1375
1376/*
1377 * trace_get_user - reads the user input string separated by space
1378 * (matched by isspace(ch))
1379 *
1380 * For each string found the 'struct trace_parser' is updated,
1381 * and the function returns.
1382 *
1383 * Returns number of bytes read.
1384 *
1385 * See kernel/trace/trace.h for 'struct trace_parser' details.
1386 */
1387int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1388 size_t cnt, loff_t *ppos)
1389{
1390 char ch;
1391 size_t read = 0;
1392 ssize_t ret;
1393
1394 if (!*ppos)
1395 trace_parser_clear(parser);
1396
1397 ret = get_user(ch, ubuf++);
1398 if (ret)
1399 goto out;
1400
1401 read++;
1402 cnt--;
1403
1404 /*
1405 * The parser is not finished with the last write,
1406 * continue reading the user input without skipping spaces.
1407 */
1408 if (!parser->cont) {
1409 /* skip white space */
1410 while (cnt && isspace(ch)) {
1411 ret = get_user(ch, ubuf++);
1412 if (ret)
1413 goto out;
1414 read++;
1415 cnt--;
1416 }
1417
Changbin Du76638d92018-01-16 17:02:29 +08001418 parser->idx = 0;
1419
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001420 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001421 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001422 *ppos += read;
1423 ret = read;
1424 goto out;
1425 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001426 }
1427
1428 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001429 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001430 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001431 parser->buffer[parser->idx++] = ch;
1432 else {
1433 ret = -EINVAL;
1434 goto out;
1435 }
1436 ret = get_user(ch, ubuf++);
1437 if (ret)
1438 goto out;
1439 read++;
1440 cnt--;
1441 }
1442
1443 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001444 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001445 parser->buffer[parser->idx] = 0;
1446 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001447 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001448 parser->cont = true;
1449 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001450 /* Make sure the parsed string always terminates with '\0'. */
1451 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001452 } else {
1453 ret = -EINVAL;
1454 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001455 }
1456
1457 *ppos += read;
1458 ret = read;
1459
1460out:
1461 return ret;
1462}
1463
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001464/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001465static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001466{
1467 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001468
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001469 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001470 return -EBUSY;
1471
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001472 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001473 if (cnt > len)
1474 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001475 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001476
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001477 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001478 return cnt;
1479}
1480
Tim Bird0e950172010-02-25 15:36:43 -08001481unsigned long __read_mostly tracing_thresh;
1482
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001483#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001484/*
1485 * Copy the new maximum trace into the separate maximum-trace
1486 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001487 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001488 */
1489static void
1490__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1491{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001492 struct trace_buffer *trace_buf = &tr->trace_buffer;
1493 struct trace_buffer *max_buf = &tr->max_buffer;
1494 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1495 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001496
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001497 max_buf->cpu = cpu;
1498 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001499
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001500 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001501 max_data->critical_start = data->critical_start;
1502 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001503
Tom Zanussi85f726a2019-03-05 10:12:00 -06001504 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001505 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001506 /*
1507 * If tsk == current, then use current_uid(), as that does not use
1508 * RCU. The irq tracer can be called out of RCU scope.
1509 */
1510 if (tsk == current)
1511 max_data->uid = current_uid();
1512 else
1513 max_data->uid = task_uid(tsk);
1514
Steven Rostedt8248ac02009-09-02 12:27:41 -04001515 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1516 max_data->policy = tsk->policy;
1517 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001518
1519 /* record this tasks comm */
1520 tracing_record_cmdline(tsk);
1521}
1522
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001523/**
1524 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1525 * @tr: tracer
1526 * @tsk: the task with the latency
1527 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001528 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001529 *
1530 * Flip the buffers between the @tr and the max_tr and record information
1531 * about which task was the cause of this latency.
1532 */
Ingo Molnare309b412008-05-12 21:20:51 +02001533void
Tom Zanussia35873a2019-02-13 17:42:45 -06001534update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1535 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001536{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001537 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001538 return;
1539
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001540 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001541
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001542 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001543 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001544 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001545 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001546 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001547
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001548 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001549
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001550 /* Inherit the recordable setting from trace_buffer */
1551 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1552 ring_buffer_record_on(tr->max_buffer.buffer);
1553 else
1554 ring_buffer_record_off(tr->max_buffer.buffer);
1555
Tom Zanussia35873a2019-02-13 17:42:45 -06001556#ifdef CONFIG_TRACER_SNAPSHOT
1557 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1558 goto out_unlock;
1559#endif
Gustavo A. R. Silva08ae88f2018-02-09 11:53:16 -06001560 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001561
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001563
1564 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001565 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566}
1567
1568/**
1569 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001570 * @tr: tracer
1571 * @tsk: task with the latency
1572 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001573 *
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001575 */
Ingo Molnare309b412008-05-12 21:20:51 +02001576void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1578{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001579 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001581 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001582 return;
1583
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001584 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001585 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001586 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001587 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001588 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001589 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001590
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001591 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001593 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001594
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001595 if (ret == -EBUSY) {
1596 /*
1597 * We failed to swap the buffer due to a commit taking
1598 * place on this CPU. We fail to record, but we reset
1599 * the max trace buffer (no one writes directly to it)
1600 * and flag that it failed.
1601 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001602 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001603 "Failed to swap buffers due to commit in progress\n");
1604 }
1605
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001606 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001607
1608 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001609 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001611#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001612
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001613static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001614{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001615 /* Iterators are static, they should be filled or empty */
1616 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001617 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001618
Rabin Vincente30f53a2014-11-10 19:46:34 +01001619 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1620 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001621}
1622
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001623#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001624static bool selftests_can_run;
1625
1626struct trace_selftests {
1627 struct list_head list;
1628 struct tracer *type;
1629};
1630
1631static LIST_HEAD(postponed_selftests);
1632
1633static int save_selftest(struct tracer *type)
1634{
1635 struct trace_selftests *selftest;
1636
1637 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1638 if (!selftest)
1639 return -ENOMEM;
1640
1641 selftest->type = type;
1642 list_add(&selftest->list, &postponed_selftests);
1643 return 0;
1644}
1645
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001646static int run_tracer_selftest(struct tracer *type)
1647{
1648 struct trace_array *tr = &global_trace;
1649 struct tracer *saved_tracer = tr->current_trace;
1650 int ret;
1651
1652 if (!type->selftest || tracing_selftest_disabled)
1653 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001654
1655 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001656 * If a tracer registers early in boot up (before scheduling is
1657 * initialized and such), then do not run its selftests yet.
1658 * Instead, run it a little later in the boot process.
1659 */
1660 if (!selftests_can_run)
1661 return save_selftest(type);
1662
1663 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001664 * Run a selftest on this tracer.
1665 * Here we reset the trace buffer, and set the current
1666 * tracer to be this tracer. The tracer can then run some
1667 * internal tracing to verify that everything is in order.
1668 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001669 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001670 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001671
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001672 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001673
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001674#ifdef CONFIG_TRACER_MAX_TRACE
1675 if (type->use_max_tr) {
1676 /* If we expanded the buffers, make sure the max is expanded too */
1677 if (ring_buffer_expanded)
1678 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1679 RING_BUFFER_ALL_CPUS);
1680 tr->allocated_snapshot = true;
1681 }
1682#endif
1683
1684 /* the test is responsible for initializing and enabling */
1685 pr_info("Testing tracer %s: ", type->name);
1686 ret = type->selftest(type, tr);
1687 /* the test is responsible for resetting too */
1688 tr->current_trace = saved_tracer;
1689 if (ret) {
1690 printk(KERN_CONT "FAILED!\n");
1691 /* Add the warning after printing 'FAILED' */
1692 WARN_ON(1);
1693 return -1;
1694 }
1695 /* Only reset on passing, to avoid touching corrupted buffers */
1696 tracing_reset_online_cpus(&tr->trace_buffer);
1697
1698#ifdef CONFIG_TRACER_MAX_TRACE
1699 if (type->use_max_tr) {
1700 tr->allocated_snapshot = false;
1701
1702 /* Shrink the max buffer again */
1703 if (ring_buffer_expanded)
1704 ring_buffer_resize(tr->max_buffer.buffer, 1,
1705 RING_BUFFER_ALL_CPUS);
1706 }
1707#endif
1708
1709 printk(KERN_CONT "PASSED\n");
1710 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001711}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001712
1713static __init int init_trace_selftests(void)
1714{
1715 struct trace_selftests *p, *n;
1716 struct tracer *t, **last;
1717 int ret;
1718
1719 selftests_can_run = true;
1720
1721 mutex_lock(&trace_types_lock);
1722
1723 if (list_empty(&postponed_selftests))
1724 goto out;
1725
1726 pr_info("Running postponed tracer tests:\n");
1727
1728 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001729 /* This loop can take minutes when sanitizers are enabled, so
1730 * lets make sure we allow RCU processing.
1731 */
1732 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001733 ret = run_tracer_selftest(p->type);
1734 /* If the test fails, then warn and remove from available_tracers */
1735 if (ret < 0) {
1736 WARN(1, "tracer: %s failed selftest, disabling\n",
1737 p->type->name);
1738 last = &trace_types;
1739 for (t = trace_types; t; t = t->next) {
1740 if (t == p->type) {
1741 *last = t->next;
1742 break;
1743 }
1744 last = &t->next;
1745 }
1746 }
1747 list_del(&p->list);
1748 kfree(p);
1749 }
1750
1751 out:
1752 mutex_unlock(&trace_types_lock);
1753
1754 return 0;
1755}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001756core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001757#else
1758static inline int run_tracer_selftest(struct tracer *type)
1759{
1760 return 0;
1761}
1762#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001763
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001764static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1765
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001766static void __init apply_trace_boot_options(void);
1767
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001768/**
1769 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001770 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001771 *
1772 * Register a new plugin tracer.
1773 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001774int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001775{
1776 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001777 int ret = 0;
1778
1779 if (!type->name) {
1780 pr_info("Tracer must have a name\n");
1781 return -1;
1782 }
1783
Dan Carpenter24a461d2010-07-10 12:06:44 +02001784 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001785 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1786 return -1;
1787 }
1788
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001789 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001790
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001791 tracing_selftest_running = true;
1792
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001793 for (t = trace_types; t; t = t->next) {
1794 if (strcmp(type->name, t->name) == 0) {
1795 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001796 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001797 type->name);
1798 ret = -1;
1799 goto out;
1800 }
1801 }
1802
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001803 if (!type->set_flag)
1804 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001805 if (!type->flags) {
1806 /*allocate a dummy tracer_flags*/
1807 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001808 if (!type->flags) {
1809 ret = -ENOMEM;
1810 goto out;
1811 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001812 type->flags->val = 0;
1813 type->flags->opts = dummy_tracer_opt;
1814 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001815 if (!type->flags->opts)
1816 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001817
Chunyu Hud39cdd22016-03-08 21:37:01 +08001818 /* store the tracer for __set_tracer_option */
1819 type->flags->trace = type;
1820
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001821 ret = run_tracer_selftest(type);
1822 if (ret < 0)
1823 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001824
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001825 type->next = trace_types;
1826 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001827 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001828
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001829 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001830 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831 mutex_unlock(&trace_types_lock);
1832
Steven Rostedtdac74942009-02-05 01:13:38 -05001833 if (ret || !default_bootup_tracer)
1834 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001835
Li Zefanee6c2c12009-09-18 14:06:47 +08001836 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001837 goto out_unlock;
1838
1839 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1840 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001841 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001842 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001843
1844 apply_trace_boot_options();
1845
Steven Rostedtdac74942009-02-05 01:13:38 -05001846 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001847 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001848#ifdef CONFIG_FTRACE_STARTUP_TEST
1849 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1850 type->name);
1851#endif
1852
1853 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001854 return ret;
1855}
1856
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04001857static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001858{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001859 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001860
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001861 if (!buffer)
1862 return;
1863
Steven Rostedtf6339032009-09-04 12:35:16 -04001864 ring_buffer_record_disable(buffer);
1865
1866 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001867 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001868 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001869
1870 ring_buffer_record_enable(buffer);
1871}
1872
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001873void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001874{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001875 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001876 int cpu;
1877
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001878 if (!buffer)
1879 return;
1880
Steven Rostedt621968c2009-09-04 12:02:35 -04001881 ring_buffer_record_disable(buffer);
1882
1883 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001884 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04001885
Alexander Z Lam94571582013-08-02 18:36:16 -07001886 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001887
1888 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001889 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001890
1891 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001892}
1893
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001894/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001895void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001896{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001897 struct trace_array *tr;
1898
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001899 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001900 if (!tr->clear_trace)
1901 continue;
1902 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001903 tracing_reset_online_cpus(&tr->trace_buffer);
1904#ifdef CONFIG_TRACER_MAX_TRACE
1905 tracing_reset_online_cpus(&tr->max_buffer);
1906#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001907 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001908}
1909
Joel Fernandesd914ba32017-06-26 19:01:55 -07001910static int *tgid_map;
1911
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001912#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001913#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001914static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001915struct saved_cmdlines_buffer {
1916 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1917 unsigned *map_cmdline_to_pid;
1918 unsigned cmdline_num;
1919 int cmdline_idx;
1920 char *saved_cmdlines;
1921};
1922static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001923
Steven Rostedt25b0b442008-05-12 21:21:00 +02001924/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001925static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001926
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001927static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001929 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1930}
1931
1932static inline void set_cmdline(int idx, const char *cmdline)
1933{
Tom Zanussi85f726a2019-03-05 10:12:00 -06001934 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001935}
1936
1937static int allocate_cmdlines_buffer(unsigned int val,
1938 struct saved_cmdlines_buffer *s)
1939{
Kees Cook6da2ec52018-06-12 13:55:00 -07001940 s->map_cmdline_to_pid = kmalloc_array(val,
1941 sizeof(*s->map_cmdline_to_pid),
1942 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001943 if (!s->map_cmdline_to_pid)
1944 return -ENOMEM;
1945
Kees Cook6da2ec52018-06-12 13:55:00 -07001946 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001947 if (!s->saved_cmdlines) {
1948 kfree(s->map_cmdline_to_pid);
1949 return -ENOMEM;
1950 }
1951
1952 s->cmdline_idx = 0;
1953 s->cmdline_num = val;
1954 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1955 sizeof(s->map_pid_to_cmdline));
1956 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1957 val * sizeof(*s->map_cmdline_to_pid));
1958
1959 return 0;
1960}
1961
1962static int trace_create_savedcmd(void)
1963{
1964 int ret;
1965
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001966 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001967 if (!savedcmd)
1968 return -ENOMEM;
1969
1970 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1971 if (ret < 0) {
1972 kfree(savedcmd);
1973 savedcmd = NULL;
1974 return -ENOMEM;
1975 }
1976
1977 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001978}
1979
Carsten Emdeb5130b12009-09-13 01:43:07 +02001980int is_tracing_stopped(void)
1981{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001982 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001983}
1984
Steven Rostedt0f048702008-11-05 16:05:44 -05001985/**
1986 * tracing_start - quick start of the tracer
1987 *
1988 * If tracing is enabled but was stopped by tracing_stop,
1989 * this will start the tracer back up.
1990 */
1991void tracing_start(void)
1992{
1993 struct ring_buffer *buffer;
1994 unsigned long flags;
1995
1996 if (tracing_disabled)
1997 return;
1998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001999 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2000 if (--global_trace.stop_count) {
2001 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002002 /* Someone screwed up their debugging */
2003 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002004 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002005 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002006 goto out;
2007 }
2008
Steven Rostedta2f80712010-03-12 19:56:00 -05002009 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002010 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002011
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002012 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002013 if (buffer)
2014 ring_buffer_record_enable(buffer);
2015
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002016#ifdef CONFIG_TRACER_MAX_TRACE
2017 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002018 if (buffer)
2019 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002020#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002021
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002022 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002023
Steven Rostedt0f048702008-11-05 16:05:44 -05002024 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002025 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2026}
2027
2028static void tracing_start_tr(struct trace_array *tr)
2029{
2030 struct ring_buffer *buffer;
2031 unsigned long flags;
2032
2033 if (tracing_disabled)
2034 return;
2035
2036 /* If global, we need to also start the max tracer */
2037 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2038 return tracing_start();
2039
2040 raw_spin_lock_irqsave(&tr->start_lock, flags);
2041
2042 if (--tr->stop_count) {
2043 if (tr->stop_count < 0) {
2044 /* Someone screwed up their debugging */
2045 WARN_ON_ONCE(1);
2046 tr->stop_count = 0;
2047 }
2048 goto out;
2049 }
2050
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002051 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002052 if (buffer)
2053 ring_buffer_record_enable(buffer);
2054
2055 out:
2056 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002057}
2058
2059/**
2060 * tracing_stop - quick stop of the tracer
2061 *
2062 * Light weight way to stop tracing. Use in conjunction with
2063 * tracing_start.
2064 */
2065void tracing_stop(void)
2066{
2067 struct ring_buffer *buffer;
2068 unsigned long flags;
2069
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002070 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2071 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002072 goto out;
2073
Steven Rostedta2f80712010-03-12 19:56:00 -05002074 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002075 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002076
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002077 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002078 if (buffer)
2079 ring_buffer_record_disable(buffer);
2080
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002081#ifdef CONFIG_TRACER_MAX_TRACE
2082 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002083 if (buffer)
2084 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002085#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002086
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002087 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002088
Steven Rostedt0f048702008-11-05 16:05:44 -05002089 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002090 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2091}
2092
2093static void tracing_stop_tr(struct trace_array *tr)
2094{
2095 struct ring_buffer *buffer;
2096 unsigned long flags;
2097
2098 /* If global, we need to also stop the max tracer */
2099 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2100 return tracing_stop();
2101
2102 raw_spin_lock_irqsave(&tr->start_lock, flags);
2103 if (tr->stop_count++)
2104 goto out;
2105
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002106 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002107 if (buffer)
2108 ring_buffer_record_disable(buffer);
2109
2110 out:
2111 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002112}
2113
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002114static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002115{
Carsten Emdea635cf02009-03-18 09:00:41 +01002116 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002117
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002118 /* treat recording of idle task as a success */
2119 if (!tsk->pid)
2120 return 1;
2121
2122 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002123 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002124
2125 /*
2126 * It's not the end of the world if we don't get
2127 * the lock, but we also don't want to spin
2128 * nor do we want to disable interrupts,
2129 * so if we miss here, then better luck next time.
2130 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002131 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002132 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002134 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002135 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002136 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002137
Carsten Emdea635cf02009-03-18 09:00:41 +01002138 /*
2139 * Check whether the cmdline buffer at idx has a pid
2140 * mapped. We are going to overwrite that entry so we
2141 * need to clear the map_pid_to_cmdline. Otherwise we
2142 * would read the new comm for the old pid.
2143 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002144 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002145 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002146 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002147
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002148 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2149 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002150
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002151 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002152 }
2153
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002154 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002155
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002156 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002157
2158 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002159}
2160
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002161static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002162{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002163 unsigned map;
2164
Steven Rostedt4ca530852009-03-16 19:20:15 -04002165 if (!pid) {
2166 strcpy(comm, "<idle>");
2167 return;
2168 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002169
Steven Rostedt74bf4072010-01-25 15:11:53 -05002170 if (WARN_ON_ONCE(pid < 0)) {
2171 strcpy(comm, "<XXX>");
2172 return;
2173 }
2174
Steven Rostedt4ca530852009-03-16 19:20:15 -04002175 if (pid > PID_MAX_DEFAULT) {
2176 strcpy(comm, "<...>");
2177 return;
2178 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002179
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002180 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002181 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302182 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002183 else
2184 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002185}
2186
2187void trace_find_cmdline(int pid, char comm[])
2188{
2189 preempt_disable();
2190 arch_spin_lock(&trace_cmdline_lock);
2191
2192 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002193
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002194 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002195 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002196}
2197
Joel Fernandesd914ba32017-06-26 19:01:55 -07002198int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002199{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002200 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2201 return 0;
2202
2203 return tgid_map[pid];
2204}
2205
2206static int trace_save_tgid(struct task_struct *tsk)
2207{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002208 /* treat recording of idle task as a success */
2209 if (!tsk->pid)
2210 return 1;
2211
2212 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002213 return 0;
2214
2215 tgid_map[tsk->pid] = tsk->tgid;
2216 return 1;
2217}
2218
2219static bool tracing_record_taskinfo_skip(int flags)
2220{
2221 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2222 return true;
2223 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2224 return true;
2225 if (!__this_cpu_read(trace_taskinfo_save))
2226 return true;
2227 return false;
2228}
2229
2230/**
2231 * tracing_record_taskinfo - record the task info of a task
2232 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002233 * @task: task to record
2234 * @flags: TRACE_RECORD_CMDLINE for recording comm
2235 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002236 */
2237void tracing_record_taskinfo(struct task_struct *task, int flags)
2238{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002239 bool done;
2240
Joel Fernandesd914ba32017-06-26 19:01:55 -07002241 if (tracing_record_taskinfo_skip(flags))
2242 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002243
2244 /*
2245 * Record as much task information as possible. If some fail, continue
2246 * to try to record the others.
2247 */
2248 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2249 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2250
2251 /* If recording any information failed, retry again soon. */
2252 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002253 return;
2254
Joel Fernandesd914ba32017-06-26 19:01:55 -07002255 __this_cpu_write(trace_taskinfo_save, false);
2256}
2257
2258/**
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2260 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002261 * @prev: previous task during sched_switch
2262 * @next: next task during sched_switch
2263 * @flags: TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002265 */
2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags)
2268{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002269 bool done;
2270
Joel Fernandesd914ba32017-06-26 19:01:55 -07002271 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002272 return;
2273
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002274 /*
2275 * Record as much task information as possible. If some fail, continue
2276 * to try to record the others.
2277 */
2278 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2279 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2280 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2281 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002282
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002283 /* If recording any information failed, retry again soon. */
2284 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002285 return;
2286
2287 __this_cpu_write(trace_taskinfo_save, false);
2288}
2289
2290/* Helpers to record a specific task information */
2291void tracing_record_cmdline(struct task_struct *task)
2292{
2293 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2294}
2295
2296void tracing_record_tgid(struct task_struct *task)
2297{
2298 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299}
2300
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002301/*
2302 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2303 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2304 * simplifies those functions and keeps them in sync.
2305 */
2306enum print_line_t trace_handle_return(struct trace_seq *s)
2307{
2308 return trace_seq_has_overflowed(s) ?
2309 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2310}
2311EXPORT_SYMBOL_GPL(trace_handle_return);
2312
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002313void
Cong Wang46710f32019-05-25 09:57:59 -07002314tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2315 unsigned long flags, int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316{
2317 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002318
Steven Rostedt777e2082008-09-29 23:02:42 -04002319 entry->preempt_count = pc & 0xff;
2320 entry->pid = (tsk) ? tsk->pid : 0;
Cong Wang46710f32019-05-25 09:57:59 -07002321 entry->type = type;
Steven Rostedt777e2082008-09-29 23:02:42 -04002322 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002323#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002324 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002325#else
2326 TRACE_FLAG_IRQS_NOSUPPORT |
2327#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002328 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002329 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302330 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002331 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2332 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002333}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002334EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002335
Steven Rostedte77405a2009-09-02 14:17:06 -04002336struct ring_buffer_event *
2337trace_buffer_lock_reserve(struct ring_buffer *buffer,
2338 int type,
2339 unsigned long len,
2340 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002341{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002342 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002343}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002344
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002345DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2346DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2347static int trace_buffered_event_ref;
2348
2349/**
2350 * trace_buffered_event_enable - enable buffering events
2351 *
2352 * When events are being filtered, it is quicker to use a temporary
2353 * buffer to write the event data into if there's a likely chance
2354 * that it will not be committed. The discard of the ring buffer
2355 * is not as fast as committing, and is much slower than copying
2356 * a commit.
2357 *
2358 * When an event is to be filtered, allocate per cpu buffers to
2359 * write the event data into, and if the event is filtered and discarded
2360 * it is simply dropped, otherwise, the entire data is to be committed
2361 * in one shot.
2362 */
2363void trace_buffered_event_enable(void)
2364{
2365 struct ring_buffer_event *event;
2366 struct page *page;
2367 int cpu;
2368
2369 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2370
2371 if (trace_buffered_event_ref++)
2372 return;
2373
2374 for_each_tracing_cpu(cpu) {
2375 page = alloc_pages_node(cpu_to_node(cpu),
2376 GFP_KERNEL | __GFP_NORETRY, 0);
2377 if (!page)
2378 goto failed;
2379
2380 event = page_address(page);
2381 memset(event, 0, sizeof(*event));
2382
2383 per_cpu(trace_buffered_event, cpu) = event;
2384
2385 preempt_disable();
2386 if (cpu == smp_processor_id() &&
2387 this_cpu_read(trace_buffered_event) !=
2388 per_cpu(trace_buffered_event, cpu))
2389 WARN_ON_ONCE(1);
2390 preempt_enable();
2391 }
2392
2393 return;
2394 failed:
2395 trace_buffered_event_disable();
2396}
2397
2398static void enable_trace_buffered_event(void *data)
2399{
2400 /* Probably not needed, but do it anyway */
2401 smp_rmb();
2402 this_cpu_dec(trace_buffered_event_cnt);
2403}
2404
2405static void disable_trace_buffered_event(void *data)
2406{
2407 this_cpu_inc(trace_buffered_event_cnt);
2408}
2409
2410/**
2411 * trace_buffered_event_disable - disable buffering events
2412 *
2413 * When a filter is removed, it is faster to not use the buffered
2414 * events, and to commit directly into the ring buffer. Free up
2415 * the temp buffers when there are no more users. This requires
2416 * special synchronization with current events.
2417 */
2418void trace_buffered_event_disable(void)
2419{
2420 int cpu;
2421
2422 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2423
2424 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2425 return;
2426
2427 if (--trace_buffered_event_ref)
2428 return;
2429
2430 preempt_disable();
2431 /* For each CPU, set the buffer as used. */
2432 smp_call_function_many(tracing_buffer_mask,
2433 disable_trace_buffered_event, NULL, 1);
2434 preempt_enable();
2435
2436 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002437 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002438
2439 for_each_tracing_cpu(cpu) {
2440 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2441 per_cpu(trace_buffered_event, cpu) = NULL;
2442 }
2443 /*
2444 * Make sure trace_buffered_event is NULL before clearing
2445 * trace_buffered_event_cnt.
2446 */
2447 smp_wmb();
2448
2449 preempt_disable();
2450 /* Do the work on each cpu */
2451 smp_call_function_many(tracing_buffer_mask,
2452 enable_trace_buffered_event, NULL, 1);
2453 preempt_enable();
2454}
2455
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002456static struct ring_buffer *temp_buffer;
2457
Steven Rostedtef5580d2009-02-27 19:38:04 -05002458struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002459trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002460 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002461 int type, unsigned long len,
2462 unsigned long flags, int pc)
2463{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002464 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002465 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002466
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002467 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002468
Tom Zanussi00b41452018-01-15 20:51:39 -06002469 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002470 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2471 (entry = this_cpu_read(trace_buffered_event))) {
2472 /* Try to use the per cpu buffer first */
2473 val = this_cpu_inc_return(trace_buffered_event_cnt);
2474 if (val == 1) {
2475 trace_event_setup(entry, type, flags, pc);
2476 entry->array[0] = len;
2477 return entry;
2478 }
2479 this_cpu_dec(trace_buffered_event_cnt);
2480 }
2481
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002482 entry = __trace_buffer_lock_reserve(*current_rb,
2483 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002484 /*
2485 * If tracing is off, but we have triggers enabled
2486 * we still need to look at the event data. Use the temp_buffer
2487 * to store the trace event for the tigger to use. It's recusive
2488 * safe and will not be recorded anywhere.
2489 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002490 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002491 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002492 entry = __trace_buffer_lock_reserve(*current_rb,
2493 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002494 }
2495 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002496}
2497EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2498
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002499static DEFINE_SPINLOCK(tracepoint_iter_lock);
2500static DEFINE_MUTEX(tracepoint_printk_mutex);
2501
2502static void output_printk(struct trace_event_buffer *fbuffer)
2503{
2504 struct trace_event_call *event_call;
2505 struct trace_event *event;
2506 unsigned long flags;
2507 struct trace_iterator *iter = tracepoint_print_iter;
2508
2509 /* We should never get here if iter is NULL */
2510 if (WARN_ON_ONCE(!iter))
2511 return;
2512
2513 event_call = fbuffer->trace_file->event_call;
2514 if (!event_call || !event_call->event.funcs ||
2515 !event_call->event.funcs->trace)
2516 return;
2517
2518 event = &fbuffer->trace_file->event_call->event;
2519
2520 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2521 trace_seq_init(&iter->seq);
2522 iter->ent = fbuffer->entry;
2523 event_call->event.funcs->trace(iter, 0, event);
2524 trace_seq_putc(&iter->seq, 0);
2525 printk("%s", iter->seq.buffer);
2526
2527 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2528}
2529
2530int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2531 void __user *buffer, size_t *lenp,
2532 loff_t *ppos)
2533{
2534 int save_tracepoint_printk;
2535 int ret;
2536
2537 mutex_lock(&tracepoint_printk_mutex);
2538 save_tracepoint_printk = tracepoint_printk;
2539
2540 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2541
2542 /*
2543 * This will force exiting early, as tracepoint_printk
2544 * is always zero when tracepoint_printk_iter is not allocated
2545 */
2546 if (!tracepoint_print_iter)
2547 tracepoint_printk = 0;
2548
2549 if (save_tracepoint_printk == tracepoint_printk)
2550 goto out;
2551
2552 if (tracepoint_printk)
2553 static_key_enable(&tracepoint_printk_key.key);
2554 else
2555 static_key_disable(&tracepoint_printk_key.key);
2556
2557 out:
2558 mutex_unlock(&tracepoint_printk_mutex);
2559
2560 return ret;
2561}
2562
2563void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2564{
2565 if (static_key_false(&tracepoint_printk_key.key))
2566 output_printk(fbuffer);
2567
2568 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2569 fbuffer->event, fbuffer->entry,
2570 fbuffer->flags, fbuffer->pc);
2571}
2572EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2573
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002574/*
2575 * Skip 3:
2576 *
2577 * trace_buffer_unlock_commit_regs()
2578 * trace_event_buffer_commit()
2579 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302580 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002581# define STACK_SKIP 3
2582
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002583void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2584 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002585 struct ring_buffer_event *event,
2586 unsigned long flags, int pc,
2587 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002588{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002589 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002590
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002591 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002592 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002593 * Note, we can still get here via blktrace, wakeup tracer
2594 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002595 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002596 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002597 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002598 ftrace_trace_userstack(buffer, flags, pc);
2599}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002600
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002601/*
2602 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2603 */
2604void
2605trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2606 struct ring_buffer_event *event)
2607{
2608 __buffer_unlock_commit(buffer, event);
2609}
2610
Chunyan Zhang478409d2016-11-21 15:57:18 +08002611static void
2612trace_process_export(struct trace_export *export,
2613 struct ring_buffer_event *event)
2614{
2615 struct trace_entry *entry;
2616 unsigned int size = 0;
2617
2618 entry = ring_buffer_event_data(event);
2619 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002620 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002621}
2622
2623static DEFINE_MUTEX(ftrace_export_lock);
2624
2625static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2626
2627static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2628
2629static inline void ftrace_exports_enable(void)
2630{
2631 static_branch_enable(&ftrace_exports_enabled);
2632}
2633
2634static inline void ftrace_exports_disable(void)
2635{
2636 static_branch_disable(&ftrace_exports_enabled);
2637}
2638
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002639static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002640{
2641 struct trace_export *export;
2642
2643 preempt_disable_notrace();
2644
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002645 export = rcu_dereference_raw_check(ftrace_exports_list);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002646 while (export) {
2647 trace_process_export(export, event);
Joel Fernandes (Google)0a5b99f2019-07-11 16:45:41 -04002648 export = rcu_dereference_raw_check(export->next);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002649 }
2650
2651 preempt_enable_notrace();
2652}
2653
2654static inline void
2655add_trace_export(struct trace_export **list, struct trace_export *export)
2656{
2657 rcu_assign_pointer(export->next, *list);
2658 /*
2659 * We are entering export into the list but another
2660 * CPU might be walking that list. We need to make sure
2661 * the export->next pointer is valid before another CPU sees
2662 * the export pointer included into the list.
2663 */
2664 rcu_assign_pointer(*list, export);
2665}
2666
2667static inline int
2668rm_trace_export(struct trace_export **list, struct trace_export *export)
2669{
2670 struct trace_export **p;
2671
2672 for (p = list; *p != NULL; p = &(*p)->next)
2673 if (*p == export)
2674 break;
2675
2676 if (*p != export)
2677 return -1;
2678
2679 rcu_assign_pointer(*p, (*p)->next);
2680
2681 return 0;
2682}
2683
2684static inline void
2685add_ftrace_export(struct trace_export **list, struct trace_export *export)
2686{
2687 if (*list == NULL)
2688 ftrace_exports_enable();
2689
2690 add_trace_export(list, export);
2691}
2692
2693static inline int
2694rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2695{
2696 int ret;
2697
2698 ret = rm_trace_export(list, export);
2699 if (*list == NULL)
2700 ftrace_exports_disable();
2701
2702 return ret;
2703}
2704
2705int register_ftrace_export(struct trace_export *export)
2706{
2707 if (WARN_ON_ONCE(!export->write))
2708 return -1;
2709
2710 mutex_lock(&ftrace_export_lock);
2711
2712 add_ftrace_export(&ftrace_exports_list, export);
2713
2714 mutex_unlock(&ftrace_export_lock);
2715
2716 return 0;
2717}
2718EXPORT_SYMBOL_GPL(register_ftrace_export);
2719
2720int unregister_ftrace_export(struct trace_export *export)
2721{
2722 int ret;
2723
2724 mutex_lock(&ftrace_export_lock);
2725
2726 ret = rm_ftrace_export(&ftrace_exports_list, export);
2727
2728 mutex_unlock(&ftrace_export_lock);
2729
2730 return ret;
2731}
2732EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2733
Ingo Molnare309b412008-05-12 21:20:51 +02002734void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002735trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002736 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2737 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002738{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002739 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002740 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002741 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002742 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002743
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002744 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2745 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002746 if (!event)
2747 return;
2748 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002749 entry->ip = ip;
2750 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002751
Chunyan Zhang478409d2016-11-21 15:57:18 +08002752 if (!call_filter_check_discard(call, entry, buffer, event)) {
2753 if (static_branch_unlikely(&ftrace_exports_enabled))
2754 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002755 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002756 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002757}
2758
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002759#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002760
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002761/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2762#define FTRACE_KSTACK_NESTING 4
2763
2764#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2765
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002766struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002767 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002768};
2769
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002770
2771struct ftrace_stacks {
2772 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2773};
2774
2775static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002776static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2777
Steven Rostedte77405a2009-09-02 14:17:06 -04002778static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002779 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002780 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002781{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002782 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002783 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002784 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002785 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002786 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002787 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002788
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002789 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002790 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002791 * If regs is set, then these functions will not be in the way.
2792 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002793#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002794 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002795 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002796#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002797
2798 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002799 * Since events can happen in NMIs there's no safe way to
2800 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2801 * or NMI comes in, it will just have to use the default
2802 * FTRACE_STACK_SIZE.
2803 */
2804 preempt_disable_notrace();
2805
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002806 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2807
2808 /* This should never happen. If it does, yell once and skip */
2809 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2810 goto out;
2811
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002812 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002813 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2814 * interrupt will either see the value pre increment or post
2815 * increment. If the interrupt happens pre increment it will have
2816 * restored the counter when it returns. We just need a barrier to
2817 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002818 */
2819 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002820
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002821 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002822 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002823
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002824 if (regs) {
2825 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2826 size, skip);
2827 } else {
2828 nr_entries = stack_trace_save(fstack->calls, size, skip);
2829 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002830
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002831 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002832 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2833 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002834 if (!event)
2835 goto out;
2836 entry = ring_buffer_event_data(event);
2837
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002838 memcpy(&entry->caller, fstack->calls, size);
2839 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002840
Tom Zanussif306cc82013-10-24 08:34:17 -05002841 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002842 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002843
2844 out:
2845 /* Again, don't let gcc optimize things here */
2846 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002847 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002848 preempt_enable_notrace();
2849
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002850}
2851
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002852static inline void ftrace_trace_stack(struct trace_array *tr,
2853 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002854 unsigned long flags,
2855 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002856{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002857 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002858 return;
2859
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002860 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002861}
2862
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002863void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2864 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002865{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002866 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2867
2868 if (rcu_is_watching()) {
2869 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2870 return;
2871 }
2872
2873 /*
2874 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2875 * but if the above rcu_is_watching() failed, then the NMI
2876 * triggered someplace critical, and rcu_irq_enter() should
2877 * not be called from NMI.
2878 */
2879 if (unlikely(in_nmi()))
2880 return;
2881
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002882 rcu_irq_enter_irqson();
2883 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2884 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002885}
2886
Steven Rostedt03889382009-12-11 09:48:22 -05002887/**
2888 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002889 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002890 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002891void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002892{
2893 unsigned long flags;
2894
2895 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002896 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002897
2898 local_save_flags(flags);
2899
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002900#ifndef CONFIG_UNWINDER_ORC
2901 /* Skip 1 to skip this function. */
2902 skip++;
2903#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002904 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2905 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002906}
Nikolay Borisovda387e52018-10-17 09:51:43 +03002907EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05002908
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002909#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01002910static DEFINE_PER_CPU(int, user_stack_count);
2911
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002912static void
Steven Rostedte77405a2009-09-02 14:17:06 -04002913ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002914{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002915 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002916 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002917 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02002918
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002919 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002920 return;
2921
Steven Rostedtb6345872010-03-12 20:03:30 -05002922 /*
2923 * NMIs can not handle page faults, even with fix ups.
2924 * The save user stack can (and often does) fault.
2925 */
2926 if (unlikely(in_nmi()))
2927 return;
2928
Steven Rostedt91e86e52010-11-10 12:56:12 +01002929 /*
2930 * prevent recursion, since the user stack tracing may
2931 * trigger other kernel events.
2932 */
2933 preempt_disable();
2934 if (__this_cpu_read(user_stack_count))
2935 goto out;
2936
2937 __this_cpu_inc(user_stack_count);
2938
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002939 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2940 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002941 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002942 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002943 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002944
Steven Rostedt48659d32009-09-11 11:36:23 -04002945 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002946 memset(&entry->caller, 0, sizeof(entry->caller));
2947
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002948 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05002949 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002950 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002951
Li Zefan1dbd1952010-12-09 15:47:56 +08002952 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002953 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002954 out:
2955 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002956}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002957#else /* CONFIG_USER_STACKTRACE_SUPPORT */
2958static void ftrace_trace_userstack(struct ring_buffer *buffer,
2959 unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002960{
Török Edwin02b67512008-11-22 13:28:47 +02002961}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02002962#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02002963
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002964#endif /* CONFIG_STACKTRACE */
2965
Steven Rostedt07d777f2011-09-22 14:01:55 -04002966/* created for use with alloc_percpu */
2967struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002968 int nesting;
2969 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002970};
2971
2972static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002973
2974/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002975 * Thise allows for lockless recording. If we're nested too deeply, then
2976 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002977 */
2978static char *get_trace_buf(void)
2979{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002980 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002981
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002982 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002983 return NULL;
2984
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002985 buffer->nesting++;
2986
2987 /* Interrupts must see nesting incremented before we use the buffer */
2988 barrier();
2989 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002990}
2991
2992static void put_trace_buf(void)
2993{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002994 /* Don't let the decrement of nesting leak before this */
2995 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002996 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002997}
2998
2999static int alloc_percpu_trace_buffer(void)
3000{
3001 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003002
3003 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003004 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3005 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003006
3007 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003008 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003009}
3010
Steven Rostedt81698832012-10-11 10:15:05 -04003011static int buffers_allocated;
3012
Steven Rostedt07d777f2011-09-22 14:01:55 -04003013void trace_printk_init_buffers(void)
3014{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003015 if (buffers_allocated)
3016 return;
3017
3018 if (alloc_percpu_trace_buffer())
3019 return;
3020
Steven Rostedt2184db42014-05-28 13:14:40 -04003021 /* trace_printk() is for debug use only. Don't use it in production. */
3022
Joe Perchesa395d6a2016-03-22 14:28:09 -07003023 pr_warn("\n");
3024 pr_warn("**********************************************************\n");
3025 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3026 pr_warn("** **\n");
3027 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3028 pr_warn("** **\n");
3029 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3030 pr_warn("** unsafe for production use. **\n");
3031 pr_warn("** **\n");
3032 pr_warn("** If you see this message and you are not debugging **\n");
3033 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3034 pr_warn("** **\n");
3035 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3036 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003037
Steven Rostedtb382ede62012-10-10 21:44:34 -04003038 /* Expand the buffers to set size */
3039 tracing_update_buffers();
3040
Steven Rostedt07d777f2011-09-22 14:01:55 -04003041 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003042
3043 /*
3044 * trace_printk_init_buffers() can be called by modules.
3045 * If that happens, then we need to start cmdline recording
3046 * directly here. If the global_trace.buffer is already
3047 * allocated here, then this was called by module code.
3048 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003050 tracing_start_cmdline_record();
3051}
Divya Indif45d1222019-03-20 11:28:51 -07003052EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003053
3054void trace_printk_start_comm(void)
3055{
3056 /* Start tracing comms if trace printk is set */
3057 if (!buffers_allocated)
3058 return;
3059 tracing_start_cmdline_record();
3060}
3061
3062static void trace_printk_start_stop_comm(int enabled)
3063{
3064 if (!buffers_allocated)
3065 return;
3066
3067 if (enabled)
3068 tracing_start_cmdline_record();
3069 else
3070 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003071}
3072
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003073/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003074 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003075 * @ip: The address of the caller
3076 * @fmt: The string format to write to the buffer
3077 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003078 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003080{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003081 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003082 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003083 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003084 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003085 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003086 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003087 char *tbuffer;
3088 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003089
3090 if (unlikely(tracing_selftest_running || tracing_disabled))
3091 return 0;
3092
3093 /* Don't pollute graph traces with trace_vprintk internals */
3094 pause_graph_tracing();
3095
3096 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003097 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003098
Steven Rostedt07d777f2011-09-22 14:01:55 -04003099 tbuffer = get_trace_buf();
3100 if (!tbuffer) {
3101 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003102 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003103 }
3104
3105 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3106
3107 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003108 goto out;
3109
Steven Rostedt07d777f2011-09-22 14:01:55 -04003110 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003111 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003112 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003113 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3114 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003115 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003116 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003117 entry = ring_buffer_event_data(event);
3118 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003119 entry->fmt = fmt;
3120
Steven Rostedt07d777f2011-09-22 14:01:55 -04003121 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003122 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003123 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003124 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003125 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003126
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003127out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003128 put_trace_buf();
3129
3130out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003131 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003132 unpause_graph_tracing();
3133
3134 return len;
3135}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003136EXPORT_SYMBOL_GPL(trace_vbprintk);
3137
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003138__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003139static int
3140__trace_array_vprintk(struct ring_buffer *buffer,
3141 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003142{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003143 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003144 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003145 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003146 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003147 unsigned long flags;
3148 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003149
3150 if (tracing_disabled || tracing_selftest_running)
3151 return 0;
3152
Steven Rostedt07d777f2011-09-22 14:01:55 -04003153 /* Don't pollute graph traces with trace_vprintk internals */
3154 pause_graph_tracing();
3155
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003156 pc = preempt_count();
3157 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003158
Steven Rostedt07d777f2011-09-22 14:01:55 -04003159
3160 tbuffer = get_trace_buf();
3161 if (!tbuffer) {
3162 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003163 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003164 }
3165
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003166 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003167
Steven Rostedt07d777f2011-09-22 14:01:55 -04003168 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003169 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003170 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3171 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003172 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003173 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003174 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003175 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003176
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003177 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003178 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003179 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003180 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003181 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003182
3183out:
3184 put_trace_buf();
3185
3186out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003187 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003188 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003189
3190 return len;
3191}
Steven Rostedt659372d2009-09-03 19:11:07 -04003192
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003193__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003194int trace_array_vprintk(struct trace_array *tr,
3195 unsigned long ip, const char *fmt, va_list args)
3196{
3197 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3198}
3199
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003200__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003201int trace_array_printk(struct trace_array *tr,
3202 unsigned long ip, const char *fmt, ...)
3203{
3204 int ret;
3205 va_list ap;
3206
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003207 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003208 return 0;
3209
3210 va_start(ap, fmt);
3211 ret = trace_array_vprintk(tr, ip, fmt, ap);
3212 va_end(ap);
3213 return ret;
3214}
Divya Indif45d1222019-03-20 11:28:51 -07003215EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003216
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003217__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003218int trace_array_printk_buf(struct ring_buffer *buffer,
3219 unsigned long ip, const char *fmt, ...)
3220{
3221 int ret;
3222 va_list ap;
3223
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003224 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003225 return 0;
3226
3227 va_start(ap, fmt);
3228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3229 va_end(ap);
3230 return ret;
3231}
3232
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003233__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003234int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3235{
Steven Rostedta813a152009-10-09 01:41:35 -04003236 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003237}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238EXPORT_SYMBOL_GPL(trace_vprintk);
3239
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003240static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003241{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003242 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3243
Steven Rostedt5a90f572008-09-03 17:42:51 -04003244 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003245 if (buf_iter)
3246 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003247}
3248
Ingo Molnare309b412008-05-12 21:20:51 +02003249static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003250peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3251 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003252{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003253 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003254 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003255
Steven Rostedtd7690412008-10-01 00:29:53 -04003256 if (buf_iter)
3257 event = ring_buffer_iter_peek(buf_iter, ts);
3258 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003259 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003260 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003261
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003262 if (event) {
3263 iter->ent_size = ring_buffer_event_length(event);
3264 return ring_buffer_event_data(event);
3265 }
3266 iter->ent_size = 0;
3267 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003268}
Steven Rostedtd7690412008-10-01 00:29:53 -04003269
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003270static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003271__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3272 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003274 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003275 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003276 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003277 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003278 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003280 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003281 int cpu;
3282
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003283 /*
3284 * If we are in a per_cpu trace file, don't bother by iterating over
3285 * all cpu and peek directly.
3286 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003287 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003288 if (ring_buffer_empty_cpu(buffer, cpu_file))
3289 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003290 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003291 if (ent_cpu)
3292 *ent_cpu = cpu_file;
3293
3294 return ent;
3295 }
3296
Steven Rostedtab464282008-05-12 21:21:00 +02003297 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003298
3299 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003300 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003301
Steven Rostedtbc21b472010-03-31 19:49:26 -04003302 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003303
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003304 /*
3305 * Pick the entry with the smallest timestamp:
3306 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003307 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003308 next = ent;
3309 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003310 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003311 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003312 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003313 }
3314 }
3315
Steven Rostedt12b5da32012-03-27 10:43:28 -04003316 iter->ent_size = next_size;
3317
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003318 if (ent_cpu)
3319 *ent_cpu = next_cpu;
3320
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003321 if (ent_ts)
3322 *ent_ts = next_ts;
3323
Steven Rostedtbc21b472010-03-31 19:49:26 -04003324 if (missing_events)
3325 *missing_events = next_lost;
3326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327 return next;
3328}
3329
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003330/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003331struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3332 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003333{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003334 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003335}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003336
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003337/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003338void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003339{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003340 iter->ent = __find_next_entry(iter, &iter->cpu,
3341 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003342
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003343 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003344 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003345
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003346 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003347}
3348
Ingo Molnare309b412008-05-12 21:20:51 +02003349static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003350{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003351 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003352 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003353}
3354
Ingo Molnare309b412008-05-12 21:20:51 +02003355static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356{
3357 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003358 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003359 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003360
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003361 WARN_ON_ONCE(iter->leftover);
3362
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003363 (*pos)++;
3364
3365 /* can't go backwards */
3366 if (iter->idx > i)
3367 return NULL;
3368
3369 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003370 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003371 else
3372 ent = iter;
3373
3374 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003375 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003376
3377 iter->pos = *pos;
3378
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003379 return ent;
3380}
3381
Jason Wessel955b61e2010-08-05 09:22:23 -05003382void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003383{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003384 struct ring_buffer_event *event;
3385 struct ring_buffer_iter *buf_iter;
3386 unsigned long entries = 0;
3387 u64 ts;
3388
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003389 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003390
Steven Rostedt6d158a82012-06-27 20:46:14 -04003391 buf_iter = trace_buffer_iter(iter, cpu);
3392 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003393 return;
3394
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003395 ring_buffer_iter_reset(buf_iter);
3396
3397 /*
3398 * We could have the case with the max latency tracers
3399 * that a reset never took place on a cpu. This is evident
3400 * by the timestamp being before the start of the buffer.
3401 */
3402 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003403 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003404 break;
3405 entries++;
3406 ring_buffer_read(buf_iter, NULL);
3407 }
3408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003409 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003410}
3411
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003412/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003413 * The current tracer is copied to avoid a global locking
3414 * all around.
3415 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416static void *s_start(struct seq_file *m, loff_t *pos)
3417{
3418 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003419 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003420 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003421 void *p = NULL;
3422 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003423 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003425 /*
3426 * copy the tracer to avoid using a global lock all around.
3427 * iter->trace is a copy of current_trace, the pointer to the
3428 * name may be used instead of a strcmp(), as iter->trace->name
3429 * will point to the same string as current_trace->name.
3430 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003432 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3433 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003434 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003436#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003437 if (iter->snapshot && iter->trace->use_max_tr)
3438 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003439#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003440
3441 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003442 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 if (*pos != iter->pos) {
3445 iter->ent = NULL;
3446 iter->cpu = 0;
3447 iter->idx = -1;
3448
Steven Rostedtae3b5092013-01-23 15:22:59 -05003449 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003450 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003451 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003452 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003453 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454
Lai Jiangshanac91d852010-03-02 17:54:50 +08003455 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003456 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3457 ;
3458
3459 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003460 /*
3461 * If we overflowed the seq_file before, then we want
3462 * to just reuse the trace_seq buffer again.
3463 */
3464 if (iter->leftover)
3465 p = iter;
3466 else {
3467 l = *pos - 1;
3468 p = s_next(m, p, &l);
3469 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003470 }
3471
Lai Jiangshan4f535962009-05-18 19:35:34 +08003472 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003473 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474 return p;
3475}
3476
3477static void s_stop(struct seq_file *m, void *p)
3478{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003479 struct trace_iterator *iter = m->private;
3480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003481#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003482 if (iter->snapshot && iter->trace->use_max_tr)
3483 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003484#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003485
3486 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003487 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003488
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003489 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003490 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491}
3492
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003493static void
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003494get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3495 unsigned long *entries, int cpu)
3496{
3497 unsigned long count;
3498
3499 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3500 /*
3501 * If this buffer has skipped entries, then we hold all
3502 * entries for the trace and we need to ignore the
3503 * ones before the time stamp.
3504 */
3505 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3506 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3507 /* total is the same as the entries */
3508 *total = count;
3509 } else
3510 *total = count +
3511 ring_buffer_overrun_cpu(buf->buffer, cpu);
3512 *entries = count;
3513}
3514
3515static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003516get_total_entries(struct trace_buffer *buf,
3517 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003518{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003519 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003520 int cpu;
3521
3522 *total = 0;
3523 *entries = 0;
3524
3525 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003526 get_total_entries_cpu(buf, &t, &e, cpu);
3527 *total += t;
3528 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003529 }
3530}
3531
Douglas Andersonecffc8a2019-03-19 10:12:05 -07003532unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3533{
3534 unsigned long total, entries;
3535
3536 if (!tr)
3537 tr = &global_trace;
3538
3539 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3540
3541 return entries;
3542}
3543
3544unsigned long trace_total_entries(struct trace_array *tr)
3545{
3546 unsigned long total, entries;
3547
3548 if (!tr)
3549 tr = &global_trace;
3550
3551 get_total_entries(&tr->trace_buffer, &total, &entries);
3552
3553 return entries;
3554}
3555
Ingo Molnare309b412008-05-12 21:20:51 +02003556static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003557{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003558 seq_puts(m, "# _------=> CPU# \n"
3559 "# / _-----=> irqs-off \n"
3560 "# | / _----=> need-resched \n"
3561 "# || / _---=> hardirq/softirq \n"
3562 "# ||| / _--=> preempt-depth \n"
3563 "# |||| / delay \n"
3564 "# cmd pid ||||| time | caller \n"
3565 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003566}
3567
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003568static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003569{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003570 unsigned long total;
3571 unsigned long entries;
3572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003573 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003574 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3575 entries, total, num_online_cpus());
3576 seq_puts(m, "#\n");
3577}
3578
Joel Fernandes441dae82017-06-25 22:38:43 -07003579static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3580 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003581{
Joel Fernandes441dae82017-06-25 22:38:43 -07003582 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3583
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003584 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003585
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003586 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3587 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003588}
3589
Joel Fernandes441dae82017-06-25 22:38:43 -07003590static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3591 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003592{
Joel Fernandes441dae82017-06-25 22:38:43 -07003593 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003594 const char *space = " ";
3595 int prec = tgid ? 10 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07003596
Quentin Perret9e738212019-02-14 15:29:50 +00003597 print_event_info(buf, m);
3598
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003599 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3600 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3601 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3602 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3603 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3604 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3605 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003606}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003607
Jiri Olsa62b915f2010-04-02 19:01:22 +02003608void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3610{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003611 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003612 struct trace_buffer *buf = iter->trace_buffer;
3613 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003614 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003615 unsigned long entries;
3616 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003617 const char *name = "preemption";
3618
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003619 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003620
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003621 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003622
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003623 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003624 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003625 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003626 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003627 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003629 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003630 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003631 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003632 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003633#if defined(CONFIG_PREEMPT_NONE)
3634 "server",
3635#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3636 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003637#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638 "preempt",
3639#else
3640 "unknown",
3641#endif
3642 /* These are reserved for later use */
3643 0, 0, 0, 0);
3644#ifdef CONFIG_SMP
3645 seq_printf(m, " #P:%d)\n", num_online_cpus());
3646#else
3647 seq_puts(m, ")\n");
3648#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003649 seq_puts(m, "# -----------------\n");
3650 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003651 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003652 data->comm, data->pid,
3653 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003654 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003655 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003656
3657 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003658 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003659 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3660 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003661 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003662 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3663 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003664 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003665 }
3666
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003667 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003668}
3669
Steven Rostedta3097202008-11-07 22:36:02 -05003670static void test_cpu_buff_start(struct trace_iterator *iter)
3671{
3672 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003673 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003674
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003675 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003676 return;
3677
3678 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3679 return;
3680
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003681 if (cpumask_available(iter->started) &&
3682 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003683 return;
3684
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003685 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003686 return;
3687
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003688 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003689 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003690
3691 /* Don't print started cpu buffer for the first entry of the trace */
3692 if (iter->idx > 1)
3693 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3694 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003695}
3696
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003697static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003698{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003699 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003700 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003701 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003702 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003703 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003704
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003705 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003706
Steven Rostedta3097202008-11-07 22:36:02 -05003707 test_cpu_buff_start(iter);
3708
Steven Rostedtf633cef2008-12-23 23:24:13 -05003709 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003710
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003711 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003712 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3713 trace_print_lat_context(iter);
3714 else
3715 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003716 }
3717
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003718 if (trace_seq_has_overflowed(s))
3719 return TRACE_TYPE_PARTIAL_LINE;
3720
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003721 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003722 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003723
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003724 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003725
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003726 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003727}
3728
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003729static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003730{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003731 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003732 struct trace_seq *s = &iter->seq;
3733 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003734 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003735
3736 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003737
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003738 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003739 trace_seq_printf(s, "%d %d %llu ",
3740 entry->pid, iter->cpu, iter->ts);
3741
3742 if (trace_seq_has_overflowed(s))
3743 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003744
Steven Rostedtf633cef2008-12-23 23:24:13 -05003745 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003746 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003747 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003748
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003749 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003750
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003751 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003752}
3753
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003754static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003755{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003756 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003757 struct trace_seq *s = &iter->seq;
3758 unsigned char newline = '\n';
3759 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003760 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003761
3762 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003763
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003764 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003765 SEQ_PUT_HEX_FIELD(s, entry->pid);
3766 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3767 SEQ_PUT_HEX_FIELD(s, iter->ts);
3768 if (trace_seq_has_overflowed(s))
3769 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003770 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003771
Steven Rostedtf633cef2008-12-23 23:24:13 -05003772 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003773 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003774 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003775 if (ret != TRACE_TYPE_HANDLED)
3776 return ret;
3777 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003778
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003779 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003780
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003781 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003782}
3783
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003784static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003785{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003786 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003787 struct trace_seq *s = &iter->seq;
3788 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003789 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003790
3791 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003792
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003793 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003794 SEQ_PUT_FIELD(s, entry->pid);
3795 SEQ_PUT_FIELD(s, iter->cpu);
3796 SEQ_PUT_FIELD(s, iter->ts);
3797 if (trace_seq_has_overflowed(s))
3798 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003799 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003800
Steven Rostedtf633cef2008-12-23 23:24:13 -05003801 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003802 return event ? event->funcs->binary(iter, 0, event) :
3803 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003804}
3805
Jiri Olsa62b915f2010-04-02 19:01:22 +02003806int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003807{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003808 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003809 int cpu;
3810
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003811 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003812 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003813 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003814 buf_iter = trace_buffer_iter(iter, cpu);
3815 if (buf_iter) {
3816 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003817 return 0;
3818 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003819 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003820 return 0;
3821 }
3822 return 1;
3823 }
3824
Steven Rostedtab464282008-05-12 21:21:00 +02003825 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003826 buf_iter = trace_buffer_iter(iter, cpu);
3827 if (buf_iter) {
3828 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003829 return 0;
3830 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003831 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003832 return 0;
3833 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003834 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003835
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003836 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003837}
3838
Lai Jiangshan4f535962009-05-18 19:35:34 +08003839/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003840enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003841{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003842 struct trace_array *tr = iter->tr;
3843 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003844 enum print_line_t ret;
3845
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003846 if (iter->lost_events) {
3847 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3848 iter->cpu, iter->lost_events);
3849 if (trace_seq_has_overflowed(&iter->seq))
3850 return TRACE_TYPE_PARTIAL_LINE;
3851 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003852
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003853 if (iter->trace && iter->trace->print_line) {
3854 ret = iter->trace->print_line(iter);
3855 if (ret != TRACE_TYPE_UNHANDLED)
3856 return ret;
3857 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003858
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003859 if (iter->ent->type == TRACE_BPUTS &&
3860 trace_flags & TRACE_ITER_PRINTK &&
3861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3862 return trace_print_bputs_msg_only(iter);
3863
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003864 if (iter->ent->type == TRACE_BPRINT &&
3865 trace_flags & TRACE_ITER_PRINTK &&
3866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003867 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003868
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003869 if (iter->ent->type == TRACE_PRINT &&
3870 trace_flags & TRACE_ITER_PRINTK &&
3871 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003872 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003873
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003874 if (trace_flags & TRACE_ITER_BIN)
3875 return print_bin_fmt(iter);
3876
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003877 if (trace_flags & TRACE_ITER_HEX)
3878 return print_hex_fmt(iter);
3879
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003880 if (trace_flags & TRACE_ITER_RAW)
3881 return print_raw_fmt(iter);
3882
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003883 return print_trace_fmt(iter);
3884}
3885
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003886void trace_latency_header(struct seq_file *m)
3887{
3888 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003889 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003890
3891 /* print nothing if the buffers are empty */
3892 if (trace_empty(iter))
3893 return;
3894
3895 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3896 print_trace_header(m, iter);
3897
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003898 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003899 print_lat_help_header(m);
3900}
3901
Jiri Olsa62b915f2010-04-02 19:01:22 +02003902void trace_default_header(struct seq_file *m)
3903{
3904 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003905 struct trace_array *tr = iter->tr;
3906 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003907
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003908 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3909 return;
3910
Jiri Olsa62b915f2010-04-02 19:01:22 +02003911 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3912 /* print nothing if the buffers are empty */
3913 if (trace_empty(iter))
3914 return;
3915 print_trace_header(m, iter);
3916 if (!(trace_flags & TRACE_ITER_VERBOSE))
3917 print_lat_help_header(m);
3918 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003919 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3920 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003921 print_func_help_header_irq(iter->trace_buffer,
3922 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003923 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003924 print_func_help_header(iter->trace_buffer, m,
3925 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003926 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003927 }
3928}
3929
Steven Rostedte0a413f2011-09-29 21:26:16 -04003930static void test_ftrace_alive(struct seq_file *m)
3931{
3932 if (!ftrace_is_dead())
3933 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003934 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3935 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003936}
3937
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003938#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003939static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003940{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003941 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3942 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3943 "# Takes a snapshot of the main buffer.\n"
3944 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3945 "# (Doesn't have to be '2' works with any number that\n"
3946 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003947}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003948
3949static void show_snapshot_percpu_help(struct seq_file *m)
3950{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003951 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003952#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003953 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3954 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003955#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003956 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3957 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003958#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003959 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3960 "# (Doesn't have to be '2' works with any number that\n"
3961 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003962}
3963
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003964static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3965{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003966 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003967 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003968 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003969 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003970
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003971 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003972 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3973 show_snapshot_main_help(m);
3974 else
3975 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003976}
3977#else
3978/* Should never be called */
3979static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3980#endif
3981
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003982static int s_show(struct seq_file *m, void *v)
3983{
3984 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003985 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003986
3987 if (iter->ent == NULL) {
3988 if (iter->tr) {
3989 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3990 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003991 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003992 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003993 if (iter->snapshot && trace_empty(iter))
3994 print_snapshot_help(m, iter);
3995 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003996 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003997 else
3998 trace_default_header(m);
3999
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004000 } else if (iter->leftover) {
4001 /*
4002 * If we filled the seq_file buffer earlier, we
4003 * want to just show it now.
4004 */
4005 ret = trace_print_seq(m, &iter->seq);
4006
4007 /* ret should this time be zero, but you never know */
4008 iter->leftover = ret;
4009
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004010 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004011 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004012 ret = trace_print_seq(m, &iter->seq);
4013 /*
4014 * If we overflow the seq_file buffer, then it will
4015 * ask us for this data again at start up.
4016 * Use that instead.
4017 * ret is 0 if seq_file write succeeded.
4018 * -1 otherwise.
4019 */
4020 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004021 }
4022
4023 return 0;
4024}
4025
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004026/*
4027 * Should be used after trace_array_get(), trace_types_lock
4028 * ensures that i_cdev was already initialized.
4029 */
4030static inline int tracing_get_cpu(struct inode *inode)
4031{
4032 if (inode->i_cdev) /* See trace_create_cpu_file() */
4033 return (long)inode->i_cdev - 1;
4034 return RING_BUFFER_ALL_CPUS;
4035}
4036
James Morris88e9d342009-09-22 16:43:43 -07004037static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004038 .start = s_start,
4039 .next = s_next,
4040 .stop = s_stop,
4041 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004042};
4043
Ingo Molnare309b412008-05-12 21:20:51 +02004044static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004045__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004046{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004047 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004048 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004049 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004050
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004051 if (tracing_disabled)
4052 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004053
Jiri Olsa50e18b92012-04-25 10:23:39 +02004054 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004055 if (!iter)
4056 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004057
Gil Fruchter72917232015-06-09 10:32:35 +03004058 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004059 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004060 if (!iter->buffer_iter)
4061 goto release;
4062
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004063 /*
4064 * We make a copy of the current tracer to avoid concurrent
4065 * changes on it while we are reading.
4066 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004067 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004068 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004069 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004070 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004071
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004072 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004073
Li Zefan79f55992009-06-15 14:58:26 +08004074 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004075 goto fail;
4076
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004077 iter->tr = tr;
4078
4079#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004080 /* Currently only the top directory has a snapshot */
4081 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004082 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004083 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004084#endif
4085 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004086 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004087 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004088 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004089 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004090
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004091 /* Notify the tracer early; before we stop tracing. */
4092 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004093 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004094
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004095 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004096 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004097 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4098
David Sharp8be07092012-11-13 12:18:22 -08004099 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004100 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004101 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4102
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004103 /* stop the trace while dumping if we are not opening "snapshot" */
4104 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004105 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004106
Steven Rostedtae3b5092013-01-23 15:22:59 -05004107 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004108 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004109 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004110 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4111 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004112 }
4113 ring_buffer_read_prepare_sync();
4114 for_each_tracing_cpu(cpu) {
4115 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004116 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004117 }
4118 } else {
4119 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004120 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004121 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4122 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004123 ring_buffer_read_prepare_sync();
4124 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004125 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004126 }
4127
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004128 mutex_unlock(&trace_types_lock);
4129
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004130 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004131
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004132 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004133 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004134 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004135 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004136release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004137 seq_release_private(inode, file);
4138 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004139}
4140
4141int tracing_open_generic(struct inode *inode, struct file *filp)
4142{
Steven Rostedt60a11772008-05-12 21:20:44 +02004143 if (tracing_disabled)
4144 return -ENODEV;
4145
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004146 filp->private_data = inode->i_private;
4147 return 0;
4148}
4149
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004150bool tracing_is_disabled(void)
4151{
4152 return (tracing_disabled) ? true: false;
4153}
4154
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004155/*
4156 * Open and update trace_array ref count.
4157 * Must have the current trace_array passed to it.
4158 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04004159static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004160{
4161 struct trace_array *tr = inode->i_private;
4162
4163 if (tracing_disabled)
4164 return -ENODEV;
4165
4166 if (trace_array_get(tr) < 0)
4167 return -ENODEV;
4168
4169 filp->private_data = inode->i_private;
4170
4171 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004172}
4173
Hannes Eder4fd27352009-02-10 19:44:12 +01004174static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004175{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004176 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004177 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004178 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004179 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004180
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004181 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004182 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004183 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004184 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004185
Oleg Nesterov6484c712013-07-23 17:26:10 +02004186 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004187 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004188 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004189
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004190 for_each_tracing_cpu(cpu) {
4191 if (iter->buffer_iter[cpu])
4192 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4193 }
4194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195 if (iter->trace && iter->trace->close)
4196 iter->trace->close(iter);
4197
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004198 if (!iter->snapshot)
4199 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004200 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004201
4202 __trace_array_put(tr);
4203
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004204 mutex_unlock(&trace_types_lock);
4205
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004206 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004207 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004208 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004209 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004210 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004211
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212 return 0;
4213}
4214
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004215static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4216{
4217 struct trace_array *tr = inode->i_private;
4218
4219 trace_array_put(tr);
4220 return 0;
4221}
4222
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004223static int tracing_single_release_tr(struct inode *inode, struct file *file)
4224{
4225 struct trace_array *tr = inode->i_private;
4226
4227 trace_array_put(tr);
4228
4229 return single_release(inode, file);
4230}
4231
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232static int tracing_open(struct inode *inode, struct file *file)
4233{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004234 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004235 struct trace_iterator *iter;
4236 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004238 if (trace_array_get(tr) < 0)
4239 return -ENODEV;
4240
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004241 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004242 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4243 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004244 struct trace_buffer *trace_buf = &tr->trace_buffer;
4245
4246#ifdef CONFIG_TRACER_MAX_TRACE
4247 if (tr->current_trace->print_max)
4248 trace_buf = &tr->max_buffer;
4249#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004250
4251 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004252 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004253 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004254 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004255 }
4256
4257 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004258 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004259 if (IS_ERR(iter))
4260 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004261 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004262 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4263 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004264
4265 if (ret < 0)
4266 trace_array_put(tr);
4267
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004268 return ret;
4269}
4270
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004271/*
4272 * Some tracers are not suitable for instance buffers.
4273 * A tracer is always available for the global array (toplevel)
4274 * or if it explicitly states that it is.
4275 */
4276static bool
4277trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4278{
4279 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4280}
4281
4282/* Find the next tracer that this trace array may use */
4283static struct tracer *
4284get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4285{
4286 while (t && !trace_ok_for_array(t, tr))
4287 t = t->next;
4288
4289 return t;
4290}
4291
Ingo Molnare309b412008-05-12 21:20:51 +02004292static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004293t_next(struct seq_file *m, void *v, loff_t *pos)
4294{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004295 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004296 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004297
4298 (*pos)++;
4299
4300 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004301 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004302
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004303 return t;
4304}
4305
4306static void *t_start(struct seq_file *m, loff_t *pos)
4307{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004308 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004309 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004310 loff_t l = 0;
4311
4312 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004313
4314 t = get_tracer_for_array(tr, trace_types);
4315 for (; t && l < *pos; t = t_next(m, t, &l))
4316 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004317
4318 return t;
4319}
4320
4321static void t_stop(struct seq_file *m, void *p)
4322{
4323 mutex_unlock(&trace_types_lock);
4324}
4325
4326static int t_show(struct seq_file *m, void *v)
4327{
4328 struct tracer *t = v;
4329
4330 if (!t)
4331 return 0;
4332
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004333 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004334 if (t->next)
4335 seq_putc(m, ' ');
4336 else
4337 seq_putc(m, '\n');
4338
4339 return 0;
4340}
4341
James Morris88e9d342009-09-22 16:43:43 -07004342static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004343 .start = t_start,
4344 .next = t_next,
4345 .stop = t_stop,
4346 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004347};
4348
4349static int show_traces_open(struct inode *inode, struct file *file)
4350{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004351 struct trace_array *tr = inode->i_private;
4352 struct seq_file *m;
4353 int ret;
4354
Steven Rostedt60a11772008-05-12 21:20:44 +02004355 if (tracing_disabled)
4356 return -ENODEV;
4357
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004358 ret = seq_open(file, &show_traces_seq_ops);
4359 if (ret)
4360 return ret;
4361
4362 m = file->private_data;
4363 m->private = tr;
4364
4365 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004366}
4367
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004368static ssize_t
4369tracing_write_stub(struct file *filp, const char __user *ubuf,
4370 size_t count, loff_t *ppos)
4371{
4372 return count;
4373}
4374
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004375loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004376{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004377 int ret;
4378
Slava Pestov364829b2010-11-24 15:13:16 -08004379 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004380 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004381 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004382 file->f_pos = ret = 0;
4383
4384 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004385}
4386
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004387static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004388 .open = tracing_open,
4389 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004390 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004391 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004392 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004393};
4394
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004395static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004396 .open = show_traces_open,
4397 .read = seq_read,
4398 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004399 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004400};
4401
4402static ssize_t
4403tracing_cpumask_read(struct file *filp, char __user *ubuf,
4404 size_t count, loff_t *ppos)
4405{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004406 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004407 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004408 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004409
Changbin Du90e406f2017-11-30 11:39:43 +08004410 len = snprintf(NULL, 0, "%*pb\n",
4411 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4412 mask_str = kmalloc(len, GFP_KERNEL);
4413 if (!mask_str)
4414 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004415
Changbin Du90e406f2017-11-30 11:39:43 +08004416 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004417 cpumask_pr_args(tr->tracing_cpumask));
4418 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004419 count = -EINVAL;
4420 goto out_err;
4421 }
Changbin Du90e406f2017-11-30 11:39:43 +08004422 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004423
4424out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004425 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004426
4427 return count;
4428}
4429
4430static ssize_t
4431tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4432 size_t count, loff_t *ppos)
4433{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004434 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304435 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004436 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304437
4438 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4439 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004440
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304441 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004442 if (err)
4443 goto err_unlock;
4444
Steven Rostedta5e25882008-12-02 15:34:05 -05004445 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004446 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004447 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004448 /*
4449 * Increase/decrease the disabled counter if we are
4450 * about to flip a bit in the cpumask:
4451 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004452 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304453 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004454 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4455 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004456 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004457 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304458 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004459 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4460 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004461 }
4462 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004463 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004464 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004465
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004466 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304467 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004468
Ingo Molnarc7078de2008-05-12 21:20:52 +02004469 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004470
4471err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004472 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004473
4474 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004475}
4476
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004477static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004478 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004479 .read = tracing_cpumask_read,
4480 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004481 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004482 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004483};
4484
Li Zefanfdb372e2009-12-08 11:15:59 +08004485static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004486{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004487 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004488 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004489 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004490 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004491
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004492 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004493 tracer_flags = tr->current_trace->flags->val;
4494 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004495
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004496 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004497 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004498 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004499 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004500 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004501 }
4502
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004503 for (i = 0; trace_opts[i].name; i++) {
4504 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004505 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004506 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004507 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004508 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004509 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004510
Li Zefanfdb372e2009-12-08 11:15:59 +08004511 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004512}
4513
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004514static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004515 struct tracer_flags *tracer_flags,
4516 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004517{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004518 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004519 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004520
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004521 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004522 if (ret)
4523 return ret;
4524
4525 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004526 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004527 else
Zhaolei77708412009-08-07 18:53:21 +08004528 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004529 return 0;
4530}
4531
Li Zefan8d18eaa2009-12-08 11:17:06 +08004532/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004533static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004534{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004535 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004536 struct tracer_flags *tracer_flags = trace->flags;
4537 struct tracer_opt *opts = NULL;
4538 int i;
4539
4540 for (i = 0; tracer_flags->opts[i].name; i++) {
4541 opts = &tracer_flags->opts[i];
4542
4543 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004544 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004545 }
4546
4547 return -EINVAL;
4548}
4549
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004550/* Some tracers require overwrite to stay enabled */
4551int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4552{
4553 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4554 return -1;
4555
4556 return 0;
4557}
4558
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004559int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004560{
4561 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004562 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004563 return 0;
4564
4565 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004566 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004567 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004568 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004569
4570 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004571 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004572 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004573 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004574
4575 if (mask == TRACE_ITER_RECORD_CMD)
4576 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004577
Joel Fernandesd914ba32017-06-26 19:01:55 -07004578 if (mask == TRACE_ITER_RECORD_TGID) {
4579 if (!tgid_map)
Kees Cook6396bb22018-06-12 14:03:40 -07004580 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4581 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004582 GFP_KERNEL);
4583 if (!tgid_map) {
4584 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4585 return -ENOMEM;
4586 }
4587
4588 trace_event_enable_tgid_record(enabled);
4589 }
4590
Steven Rostedtc37775d2016-04-13 16:59:18 -04004591 if (mask == TRACE_ITER_EVENT_FORK)
4592 trace_event_follow_fork(tr, enabled);
4593
Namhyung Kim1e104862017-04-17 11:44:28 +09004594 if (mask == TRACE_ITER_FUNC_FORK)
4595 ftrace_pid_follow_fork(tr, enabled);
4596
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004597 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004598 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004599#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004600 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004601#endif
4602 }
Steven Rostedt81698832012-10-11 10:15:05 -04004603
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004604 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004605 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004606 trace_printk_control(enabled);
4607 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004608
4609 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004610}
4611
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004612static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004613{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004614 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004615 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004616 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004617 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004618 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004619
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004620 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004621
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004622 len = str_has_prefix(cmp, "no");
4623 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004624 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004625
4626 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004627
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004628 mutex_lock(&trace_types_lock);
4629
Yisheng Xie591a0332018-05-17 16:36:03 +08004630 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004631 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004632 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004633 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004634 else
4635 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004636
4637 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004638
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004639 /*
4640 * If the first trailing whitespace is replaced with '\0' by strstrip,
4641 * turn it back into a space.
4642 */
4643 if (orig_len > strlen(option))
4644 option[strlen(option)] = ' ';
4645
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004646 return ret;
4647}
4648
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004649static void __init apply_trace_boot_options(void)
4650{
4651 char *buf = trace_boot_options_buf;
4652 char *option;
4653
4654 while (true) {
4655 option = strsep(&buf, ",");
4656
4657 if (!option)
4658 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004659
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004660 if (*option)
4661 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004662
4663 /* Put back the comma to allow this to be called again */
4664 if (buf)
4665 *(buf - 1) = ',';
4666 }
4667}
4668
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004669static ssize_t
4670tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4671 size_t cnt, loff_t *ppos)
4672{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004673 struct seq_file *m = filp->private_data;
4674 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004675 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004676 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004677
4678 if (cnt >= sizeof(buf))
4679 return -EINVAL;
4680
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004681 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004682 return -EFAULT;
4683
Steven Rostedta8dd2172013-01-09 20:54:17 -05004684 buf[cnt] = 0;
4685
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004686 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004687 if (ret < 0)
4688 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004689
Jiri Olsacf8517c2009-10-23 19:36:16 -04004690 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004691
4692 return cnt;
4693}
4694
Li Zefanfdb372e2009-12-08 11:15:59 +08004695static int tracing_trace_options_open(struct inode *inode, struct file *file)
4696{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004697 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004698 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004699
Li Zefanfdb372e2009-12-08 11:15:59 +08004700 if (tracing_disabled)
4701 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004702
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004703 if (trace_array_get(tr) < 0)
4704 return -ENODEV;
4705
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004706 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4707 if (ret < 0)
4708 trace_array_put(tr);
4709
4710 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004711}
4712
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004713static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004714 .open = tracing_trace_options_open,
4715 .read = seq_read,
4716 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004717 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004718 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004719};
4720
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004721static const char readme_msg[] =
4722 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004723 "# echo 0 > tracing_on : quick way to disable tracing\n"
4724 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4725 " Important files:\n"
4726 " trace\t\t\t- The static contents of the buffer\n"
4727 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4728 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4729 " current_tracer\t- function and latency tracers\n"
4730 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05004731 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004732 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4733 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4734 " trace_clock\t\t-change the clock used to order events\n"
4735 " local: Per cpu clock but may not be synced across CPUs\n"
4736 " global: Synced across CPUs but slows tracing down.\n"
4737 " counter: Not a clock, but just an increment\n"
4738 " uptime: Jiffy counter from time of boot\n"
4739 " perf: Same clock that perf events use\n"
4740#ifdef CONFIG_X86_64
4741 " x86-tsc: TSC cycle counter\n"
4742#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004743 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4744 " delta: Delta difference against a buffer-wide timestamp\n"
4745 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004746 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004747 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004748 " tracing_cpumask\t- Limit which CPUs to trace\n"
4749 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4750 "\t\t\t Remove sub-buffer with rmdir\n"
4751 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08004752 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004753 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004754 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004755#ifdef CONFIG_DYNAMIC_FTRACE
4756 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004757 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4758 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004759 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004760 "\t modules: Can select a group via module\n"
4761 "\t Format: :mod:<module-name>\n"
4762 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4763 "\t triggers: a command to perform when function is hit\n"
4764 "\t Format: <function>:<trigger>[:count]\n"
4765 "\t trigger: traceon, traceoff\n"
4766 "\t\t enable_event:<system>:<event>\n"
4767 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004768#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004769 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004770#endif
4771#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004772 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004773#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004774 "\t\t dump\n"
4775 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004776 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4777 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4778 "\t The first one will disable tracing every time do_fault is hit\n"
4779 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4780 "\t The first time do trap is hit and it disables tracing, the\n"
4781 "\t counter will decrement to 2. If tracing is already disabled,\n"
4782 "\t the counter will not decrement. It only decrements when the\n"
4783 "\t trigger did work\n"
4784 "\t To remove trigger without count:\n"
4785 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4786 "\t To remove trigger with a count:\n"
4787 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004788 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004789 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4790 "\t modules: Can select a group via module command :mod:\n"
4791 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004792#endif /* CONFIG_DYNAMIC_FTRACE */
4793#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004794 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4795 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004796#endif
4797#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4798 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004799 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004800 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4801#endif
4802#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004803 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4804 "\t\t\t snapshot buffer. Read the contents for more\n"
4805 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004806#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004807#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004808 " stack_trace\t\t- Shows the max stack trace when active\n"
4809 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004810 "\t\t\t Write into this file to reset the max size (trigger a\n"
4811 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004812#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004813 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4814 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004815#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004816#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004817#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004818 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004819 "\t\t\t Write into this file to define/undefine new trace events.\n"
4820#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004821#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09004822 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004823 "\t\t\t Write into this file to define/undefine new trace events.\n"
4824#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004825#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09004826 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004827 "\t\t\t Write into this file to define/undefine new trace events.\n"
4828#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004829#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004830 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004831 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4832 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004833#ifdef CONFIG_HIST_TRIGGERS
4834 "\t s:[synthetic/]<event> <field> [<field>]\n"
4835#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004836 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004837#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004838 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304839 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004840#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004841#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05304842 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004843#endif
4844 "\t args: <name>=fetcharg[:type]\n"
4845 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004846#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004847 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004848#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09004849 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004850#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09004851 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09004852 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09004853 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09004854 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004855#ifdef CONFIG_HIST_TRIGGERS
4856 "\t field: <stype> <name>;\n"
4857 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4858 "\t [unsigned] char/int/long\n"
4859#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004860#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004861 " events/\t\t- Directory containing all trace event subsystems:\n"
4862 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4863 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004864 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4865 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004866 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004867 " events/<system>/<event>/\t- Directory containing control files for\n"
4868 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004869 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4870 " filter\t\t- If set, only events passing filter are traced\n"
4871 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004872 "\t Format: <trigger>[:count][if <filter>]\n"
4873 "\t trigger: traceon, traceoff\n"
4874 "\t enable_event:<system>:<event>\n"
4875 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004876#ifdef CONFIG_HIST_TRIGGERS
4877 "\t enable_hist:<system>:<event>\n"
4878 "\t disable_hist:<system>:<event>\n"
4879#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004880#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004881 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004882#endif
4883#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004884 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004885#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004886#ifdef CONFIG_HIST_TRIGGERS
4887 "\t\t hist (see below)\n"
4888#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004889 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4890 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4891 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4892 "\t events/block/block_unplug/trigger\n"
4893 "\t The first disables tracing every time block_unplug is hit.\n"
4894 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4895 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4896 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4897 "\t Like function triggers, the counter is only decremented if it\n"
4898 "\t enabled or disabled tracing.\n"
4899 "\t To remove a trigger without a count:\n"
4900 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4901 "\t To remove a trigger with a count:\n"
4902 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4903 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004904#ifdef CONFIG_HIST_TRIGGERS
4905 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004906 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004907 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004908 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004909 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004910 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004911 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004912 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004913 "\t [if <filter>]\n\n"
4914 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004915 "\t table using the key(s) and value(s) named, and the value of a\n"
4916 "\t sum called 'hitcount' is incremented. Keys and values\n"
4917 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004918 "\t can be any field, or the special string 'stacktrace'.\n"
4919 "\t Compound keys consisting of up to two fields can be specified\n"
4920 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4921 "\t fields. Sort keys consisting of up to two fields can be\n"
4922 "\t specified using the 'sort' keyword. The sort direction can\n"
4923 "\t be modified by appending '.descending' or '.ascending' to a\n"
4924 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004925 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4926 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4927 "\t its histogram data will be shared with other triggers of the\n"
4928 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004929 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004930 "\t table in its entirety to stdout. If there are multiple hist\n"
4931 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004932 "\t trigger in the output. The table displayed for a named\n"
4933 "\t trigger will be the same as any other instance having the\n"
4934 "\t same name. The default format used to display a given field\n"
4935 "\t can be modified by appending any of the following modifiers\n"
4936 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004937 "\t .hex display a number as a hex value\n"
4938 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004939 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004940 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06004941 "\t .syscall display a syscall id as a syscall name\n"
4942 "\t .log2 display log2 value rather than raw number\n"
4943 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004944 "\t The 'pause' parameter can be used to pause an existing hist\n"
4945 "\t trigger or to start a hist trigger but not log any events\n"
4946 "\t until told to do so. 'continue' can be used to start or\n"
4947 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004948 "\t The 'clear' parameter will clear the contents of a running\n"
4949 "\t hist trigger and leave its current paused/active state\n"
4950 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004951 "\t The enable_hist and disable_hist triggers can be used to\n"
4952 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00004953 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004954 "\t the enable_event and disable_event triggers.\n\n"
4955 "\t Hist trigger handlers and actions are executed whenever a\n"
4956 "\t a histogram entry is added or updated. They take the form:\n\n"
4957 "\t <handler>.<action>\n\n"
4958 "\t The available handlers are:\n\n"
4959 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06004960 "\t onmax(var) - invoke if var exceeds current max\n"
4961 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004962 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06004963 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004964 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06004965#ifdef CONFIG_TRACER_SNAPSHOT
4966 "\t snapshot() - snapshot the trace buffer\n"
4967#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004968#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004969;
4970
4971static ssize_t
4972tracing_readme_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
4975 return simple_read_from_buffer(ubuf, cnt, ppos,
4976 readme_msg, strlen(readme_msg));
4977}
4978
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004979static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004980 .open = tracing_open_generic,
4981 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004982 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004983};
4984
Michael Sartain99c621d2017-07-05 22:07:15 -06004985static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4986{
4987 int *ptr = v;
4988
4989 if (*pos || m->count)
4990 ptr++;
4991
4992 (*pos)++;
4993
4994 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4995 if (trace_find_tgid(*ptr))
4996 return ptr;
4997 }
4998
4999 return NULL;
5000}
5001
5002static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5003{
5004 void *v;
5005 loff_t l = 0;
5006
5007 if (!tgid_map)
5008 return NULL;
5009
5010 v = &tgid_map[0];
5011 while (l <= *pos) {
5012 v = saved_tgids_next(m, v, &l);
5013 if (!v)
5014 return NULL;
5015 }
5016
5017 return v;
5018}
5019
5020static void saved_tgids_stop(struct seq_file *m, void *v)
5021{
5022}
5023
5024static int saved_tgids_show(struct seq_file *m, void *v)
5025{
5026 int pid = (int *)v - tgid_map;
5027
5028 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5029 return 0;
5030}
5031
5032static const struct seq_operations tracing_saved_tgids_seq_ops = {
5033 .start = saved_tgids_start,
5034 .stop = saved_tgids_stop,
5035 .next = saved_tgids_next,
5036 .show = saved_tgids_show,
5037};
5038
5039static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5040{
5041 if (tracing_disabled)
5042 return -ENODEV;
5043
5044 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5045}
5046
5047
5048static const struct file_operations tracing_saved_tgids_fops = {
5049 .open = tracing_saved_tgids_open,
5050 .read = seq_read,
5051 .llseek = seq_lseek,
5052 .release = seq_release,
5053};
5054
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005055static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005056{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005057 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005058
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005059 if (*pos || m->count)
5060 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005061
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005062 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005063
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005064 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5065 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005066 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005067 continue;
5068
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005069 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005070 }
5071
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005072 return NULL;
5073}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005074
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005075static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5076{
5077 void *v;
5078 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005079
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005080 preempt_disable();
5081 arch_spin_lock(&trace_cmdline_lock);
5082
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005083 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005084 while (l <= *pos) {
5085 v = saved_cmdlines_next(m, v, &l);
5086 if (!v)
5087 return NULL;
5088 }
5089
5090 return v;
5091}
5092
5093static void saved_cmdlines_stop(struct seq_file *m, void *v)
5094{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005095 arch_spin_unlock(&trace_cmdline_lock);
5096 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005097}
5098
5099static int saved_cmdlines_show(struct seq_file *m, void *v)
5100{
5101 char buf[TASK_COMM_LEN];
5102 unsigned int *pid = v;
5103
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005104 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005105 seq_printf(m, "%d %s\n", *pid, buf);
5106 return 0;
5107}
5108
5109static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5110 .start = saved_cmdlines_start,
5111 .next = saved_cmdlines_next,
5112 .stop = saved_cmdlines_stop,
5113 .show = saved_cmdlines_show,
5114};
5115
5116static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5117{
5118 if (tracing_disabled)
5119 return -ENODEV;
5120
5121 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005122}
5123
5124static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005125 .open = tracing_saved_cmdlines_open,
5126 .read = seq_read,
5127 .llseek = seq_lseek,
5128 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005129};
5130
5131static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005132tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5133 size_t cnt, loff_t *ppos)
5134{
5135 char buf[64];
5136 int r;
5137
5138 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005139 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005140 arch_spin_unlock(&trace_cmdline_lock);
5141
5142 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5143}
5144
5145static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5146{
5147 kfree(s->saved_cmdlines);
5148 kfree(s->map_cmdline_to_pid);
5149 kfree(s);
5150}
5151
5152static int tracing_resize_saved_cmdlines(unsigned int val)
5153{
5154 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5155
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005156 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005157 if (!s)
5158 return -ENOMEM;
5159
5160 if (allocate_cmdlines_buffer(val, s) < 0) {
5161 kfree(s);
5162 return -ENOMEM;
5163 }
5164
5165 arch_spin_lock(&trace_cmdline_lock);
5166 savedcmd_temp = savedcmd;
5167 savedcmd = s;
5168 arch_spin_unlock(&trace_cmdline_lock);
5169 free_saved_cmdlines_buffer(savedcmd_temp);
5170
5171 return 0;
5172}
5173
5174static ssize_t
5175tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5176 size_t cnt, loff_t *ppos)
5177{
5178 unsigned long val;
5179 int ret;
5180
5181 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5182 if (ret)
5183 return ret;
5184
5185 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5186 if (!val || val > PID_MAX_DEFAULT)
5187 return -EINVAL;
5188
5189 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5190 if (ret < 0)
5191 return ret;
5192
5193 *ppos += cnt;
5194
5195 return cnt;
5196}
5197
5198static const struct file_operations tracing_saved_cmdlines_size_fops = {
5199 .open = tracing_open_generic,
5200 .read = tracing_saved_cmdlines_size_read,
5201 .write = tracing_saved_cmdlines_size_write,
5202};
5203
Jeremy Linton681bec02017-05-31 16:56:53 -05005204#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005205static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005206update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005207{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005208 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005209 if (ptr->tail.next) {
5210 ptr = ptr->tail.next;
5211 /* Set ptr to the next real item (skip head) */
5212 ptr++;
5213 } else
5214 return NULL;
5215 }
5216 return ptr;
5217}
5218
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005219static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005220{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005221 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005222
5223 /*
5224 * Paranoid! If ptr points to end, we don't want to increment past it.
5225 * This really should never happen.
5226 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005227 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005228 if (WARN_ON_ONCE(!ptr))
5229 return NULL;
5230
5231 ptr++;
5232
5233 (*pos)++;
5234
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005235 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005236
5237 return ptr;
5238}
5239
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005240static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005241{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005242 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005243 loff_t l = 0;
5244
Jeremy Linton1793ed92017-05-31 16:56:46 -05005245 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005246
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005247 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005248 if (v)
5249 v++;
5250
5251 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005252 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005253 }
5254
5255 return v;
5256}
5257
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005258static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005259{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005260 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005261}
5262
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005263static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005264{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005265 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005266
5267 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005268 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005269 ptr->map.system);
5270
5271 return 0;
5272}
5273
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005274static const struct seq_operations tracing_eval_map_seq_ops = {
5275 .start = eval_map_start,
5276 .next = eval_map_next,
5277 .stop = eval_map_stop,
5278 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005279};
5280
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005281static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005282{
5283 if (tracing_disabled)
5284 return -ENODEV;
5285
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005286 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005287}
5288
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005289static const struct file_operations tracing_eval_map_fops = {
5290 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005291 .read = seq_read,
5292 .llseek = seq_lseek,
5293 .release = seq_release,
5294};
5295
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005296static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005297trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005298{
5299 /* Return tail of array given the head */
5300 return ptr + ptr->head.length + 1;
5301}
5302
5303static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005304trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005305 int len)
5306{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005307 struct trace_eval_map **stop;
5308 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005309 union trace_eval_map_item *map_array;
5310 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005311
5312 stop = start + len;
5313
5314 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005315 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005316 * where the head holds the module and length of array, and the
5317 * tail holds a pointer to the next list.
5318 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005319 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005320 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005321 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005322 return;
5323 }
5324
Jeremy Linton1793ed92017-05-31 16:56:46 -05005325 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005326
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005327 if (!trace_eval_maps)
5328 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005329 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005330 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005331 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005332 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005333 if (!ptr->tail.next)
5334 break;
5335 ptr = ptr->tail.next;
5336
5337 }
5338 ptr->tail.next = map_array;
5339 }
5340 map_array->head.mod = mod;
5341 map_array->head.length = len;
5342 map_array++;
5343
5344 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5345 map_array->map = **map;
5346 map_array++;
5347 }
5348 memset(map_array, 0, sizeof(*map_array));
5349
Jeremy Linton1793ed92017-05-31 16:56:46 -05005350 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005351}
5352
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005353static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005354{
Jeremy Linton681bec02017-05-31 16:56:53 -05005355 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005356 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005357}
5358
Jeremy Linton681bec02017-05-31 16:56:53 -05005359#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005360static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5361static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005362 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005363#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005364
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005365static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005366 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005367{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005368 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005369
5370 if (len <= 0)
5371 return;
5372
5373 map = start;
5374
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005375 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005376
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005377 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005378}
5379
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005380static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005381tracing_set_trace_read(struct file *filp, char __user *ubuf,
5382 size_t cnt, loff_t *ppos)
5383{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005384 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005385 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005386 int r;
5387
5388 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005389 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005390 mutex_unlock(&trace_types_lock);
5391
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005392 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005393}
5394
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005395int tracer_init(struct tracer *t, struct trace_array *tr)
5396{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005397 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005398 return t->init(tr);
5399}
5400
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005401static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005402{
5403 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005404
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005405 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005406 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005407}
5408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005409#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005410/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005411static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5412 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005413{
5414 int cpu, ret = 0;
5415
5416 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5417 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418 ret = ring_buffer_resize(trace_buf->buffer,
5419 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005420 if (ret < 0)
5421 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005422 per_cpu_ptr(trace_buf->data, cpu)->entries =
5423 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005424 }
5425 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005426 ret = ring_buffer_resize(trace_buf->buffer,
5427 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005428 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005429 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5430 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005431 }
5432
5433 return ret;
5434}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005435#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005436
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005437static int __tracing_resize_ring_buffer(struct trace_array *tr,
5438 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005439{
5440 int ret;
5441
5442 /*
5443 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005444 * we use the size that was given, and we can forget about
5445 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005446 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005447 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005448
Steven Rostedtb382ede62012-10-10 21:44:34 -04005449 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005450 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005451 return 0;
5452
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005453 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005454 if (ret < 0)
5455 return ret;
5456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005458 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5459 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005460 goto out;
5461
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005462 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005463 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005464 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5465 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005466 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005467 /*
5468 * AARGH! We are left with different
5469 * size max buffer!!!!
5470 * The max buffer is our "snapshot" buffer.
5471 * When a tracer needs a snapshot (one of the
5472 * latency tracers), it swaps the max buffer
5473 * with the saved snap shot. We succeeded to
5474 * update the size of the main buffer, but failed to
5475 * update the size of the max buffer. But when we tried
5476 * to reset the main buffer to the original size, we
5477 * failed there too. This is very unlikely to
5478 * happen, but if it does, warn and kill all
5479 * tracing.
5480 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005481 WARN_ON(1);
5482 tracing_disabled = 1;
5483 }
5484 return ret;
5485 }
5486
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005487 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005488 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005489 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005490 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005491
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005492 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005493#endif /* CONFIG_TRACER_MAX_TRACE */
5494
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005495 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005496 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005497 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005498 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005499
5500 return ret;
5501}
5502
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005503static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5504 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005505{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005506 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005507
5508 mutex_lock(&trace_types_lock);
5509
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005510 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5511 /* make sure, this cpu is enabled in the mask */
5512 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5513 ret = -EINVAL;
5514 goto out;
5515 }
5516 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005517
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005518 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005519 if (ret < 0)
5520 ret = -ENOMEM;
5521
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005522out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005523 mutex_unlock(&trace_types_lock);
5524
5525 return ret;
5526}
5527
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005528
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005529/**
5530 * tracing_update_buffers - used by tracing facility to expand ring buffers
5531 *
5532 * To save on memory when the tracing is never used on a system with it
5533 * configured in. The ring buffers are set to a minimum size. But once
5534 * a user starts to use the tracing facility, then they need to grow
5535 * to their default size.
5536 *
5537 * This function is to be called when a tracer is about to be used.
5538 */
5539int tracing_update_buffers(void)
5540{
5541 int ret = 0;
5542
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005543 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005544 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005545 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005546 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005547 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005548
5549 return ret;
5550}
5551
Steven Rostedt577b7852009-02-26 23:43:05 -05005552struct trace_option_dentry;
5553
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005554static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005555create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005556
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005557/*
5558 * Used to clear out the tracer before deletion of an instance.
5559 * Must have trace_types_lock held.
5560 */
5561static void tracing_set_nop(struct trace_array *tr)
5562{
5563 if (tr->current_trace == &nop_trace)
5564 return;
5565
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005566 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005567
5568 if (tr->current_trace->reset)
5569 tr->current_trace->reset(tr);
5570
5571 tr->current_trace = &nop_trace;
5572}
5573
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005574static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005575{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005576 /* Only enable if the directory has been created already. */
5577 if (!tr->dir)
5578 return;
5579
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005580 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005581}
5582
5583static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5584{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005585 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005586#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005587 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005588#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005589 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005590
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005591 mutex_lock(&trace_types_lock);
5592
Steven Rostedt73c51622009-03-11 13:42:01 -04005593 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005594 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005595 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005596 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005597 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005598 ret = 0;
5599 }
5600
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005601 for (t = trace_types; t; t = t->next) {
5602 if (strcmp(t->name, buf) == 0)
5603 break;
5604 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005605 if (!t) {
5606 ret = -EINVAL;
5607 goto out;
5608 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005609 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005610 goto out;
5611
Tom Zanussia35873a2019-02-13 17:42:45 -06005612#ifdef CONFIG_TRACER_SNAPSHOT
5613 if (t->use_max_tr) {
5614 arch_spin_lock(&tr->max_lock);
5615 if (tr->cond_snapshot)
5616 ret = -EBUSY;
5617 arch_spin_unlock(&tr->max_lock);
5618 if (ret)
5619 goto out;
5620 }
5621#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005622 /* Some tracers won't work on kernel command line */
5623 if (system_state < SYSTEM_RUNNING && t->noboot) {
5624 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5625 t->name);
5626 goto out;
5627 }
5628
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005629 /* Some tracers are only allowed for the top level buffer */
5630 if (!trace_ok_for_array(t, tr)) {
5631 ret = -EINVAL;
5632 goto out;
5633 }
5634
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005635 /* If trace pipe files are being read, we can't change the tracer */
5636 if (tr->current_trace->ref) {
5637 ret = -EBUSY;
5638 goto out;
5639 }
5640
Steven Rostedt9f029e82008-11-12 15:24:24 -05005641 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005642
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005643 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005644
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005645 if (tr->current_trace->reset)
5646 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005647
Paul E. McKenney74401722018-11-06 18:44:52 -08005648 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005649 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005650
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005651#ifdef CONFIG_TRACER_MAX_TRACE
5652 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005653
5654 if (had_max_tr && !t->use_max_tr) {
5655 /*
5656 * We need to make sure that the update_max_tr sees that
5657 * current_trace changed to nop_trace to keep it from
5658 * swapping the buffers after we resize it.
5659 * The update_max_tr is called from interrupts disabled
5660 * so a synchronized_sched() is sufficient.
5661 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005662 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005663 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005664 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005665#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005666
5667#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005668 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005669 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005670 if (ret < 0)
5671 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005672 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005673#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005674
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005675 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005676 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005677 if (ret)
5678 goto out;
5679 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005680
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005681 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005682 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005683 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005684 out:
5685 mutex_unlock(&trace_types_lock);
5686
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005687 return ret;
5688}
5689
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005690static ssize_t
5691tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5692 size_t cnt, loff_t *ppos)
5693{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005694 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005695 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005696 int i;
5697 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005698 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005699
Steven Rostedt60063a62008-10-28 10:44:24 -04005700 ret = cnt;
5701
Li Zefanee6c2c12009-09-18 14:06:47 +08005702 if (cnt > MAX_TRACER_SIZE)
5703 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005704
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005705 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005706 return -EFAULT;
5707
5708 buf[cnt] = 0;
5709
5710 /* strip ending whitespace. */
5711 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5712 buf[i] = 0;
5713
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005714 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005715 if (err)
5716 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005717
Jiri Olsacf8517c2009-10-23 19:36:16 -04005718 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005719
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005720 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005721}
5722
5723static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005724tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5725 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005726{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005727 char buf[64];
5728 int r;
5729
Steven Rostedtcffae432008-05-12 21:21:00 +02005730 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005731 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005732 if (r > sizeof(buf))
5733 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005734 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005735}
5736
5737static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005738tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5739 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005740{
Hannes Eder5e398412009-02-10 19:44:34 +01005741 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005742 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005743
Peter Huewe22fe9b52011-06-07 21:58:27 +02005744 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5745 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005746 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005747
5748 *ptr = val * 1000;
5749
5750 return cnt;
5751}
5752
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005753static ssize_t
5754tracing_thresh_read(struct file *filp, char __user *ubuf,
5755 size_t cnt, loff_t *ppos)
5756{
5757 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5758}
5759
5760static ssize_t
5761tracing_thresh_write(struct file *filp, const char __user *ubuf,
5762 size_t cnt, loff_t *ppos)
5763{
5764 struct trace_array *tr = filp->private_data;
5765 int ret;
5766
5767 mutex_lock(&trace_types_lock);
5768 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5769 if (ret < 0)
5770 goto out;
5771
5772 if (tr->current_trace->update_thresh) {
5773 ret = tr->current_trace->update_thresh(tr);
5774 if (ret < 0)
5775 goto out;
5776 }
5777
5778 ret = cnt;
5779out:
5780 mutex_unlock(&trace_types_lock);
5781
5782 return ret;
5783}
5784
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005785#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005786
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005787static ssize_t
5788tracing_max_lat_read(struct file *filp, char __user *ubuf,
5789 size_t cnt, loff_t *ppos)
5790{
5791 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5792}
5793
5794static ssize_t
5795tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5796 size_t cnt, loff_t *ppos)
5797{
5798 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5799}
5800
Chen Gange428abb2015-11-10 05:15:15 +08005801#endif
5802
Steven Rostedtb3806b42008-05-12 21:20:46 +02005803static int tracing_open_pipe(struct inode *inode, struct file *filp)
5804{
Oleg Nesterov15544202013-07-23 17:25:57 +02005805 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005806 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005807 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005808
5809 if (tracing_disabled)
5810 return -ENODEV;
5811
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005812 if (trace_array_get(tr) < 0)
5813 return -ENODEV;
5814
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005815 mutex_lock(&trace_types_lock);
5816
Steven Rostedtb3806b42008-05-12 21:20:46 +02005817 /* create a buffer to store the information to pass to userspace */
5818 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005819 if (!iter) {
5820 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005821 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005822 goto out;
5823 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005824
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005825 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005826 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005827
5828 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5829 ret = -ENOMEM;
5830 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305831 }
5832
Steven Rostedta3097202008-11-07 22:36:02 -05005833 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305834 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005835
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005836 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005837 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5838
David Sharp8be07092012-11-13 12:18:22 -08005839 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005840 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005841 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5842
Oleg Nesterov15544202013-07-23 17:25:57 +02005843 iter->tr = tr;
5844 iter->trace_buffer = &tr->trace_buffer;
5845 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005846 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005847 filp->private_data = iter;
5848
Steven Rostedt107bad82008-05-12 21:21:01 +02005849 if (iter->trace->pipe_open)
5850 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005851
Arnd Bergmannb4447862010-07-07 23:40:11 +02005852 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005853
5854 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005855out:
5856 mutex_unlock(&trace_types_lock);
5857 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005858
5859fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005860 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005861 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005862 mutex_unlock(&trace_types_lock);
5863 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005864}
5865
5866static int tracing_release_pipe(struct inode *inode, struct file *file)
5867{
5868 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005869 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005870
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005871 mutex_lock(&trace_types_lock);
5872
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005873 tr->current_trace->ref--;
5874
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005875 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005876 iter->trace->pipe_close(iter);
5877
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005878 mutex_unlock(&trace_types_lock);
5879
Rusty Russell44623442009-01-01 10:12:23 +10305880 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005881 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005882 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005883
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005884 trace_array_put(tr);
5885
Steven Rostedtb3806b42008-05-12 21:20:46 +02005886 return 0;
5887}
5888
Al Viro9dd95742017-07-03 00:42:43 -04005889static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005890trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005891{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005892 struct trace_array *tr = iter->tr;
5893
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005894 /* Iterators are static, they should be filled or empty */
5895 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005896 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005897
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005898 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005899 /*
5900 * Always select as readable when in blocking mode
5901 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005902 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005903 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005904 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005905 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005906}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005907
Al Viro9dd95742017-07-03 00:42:43 -04005908static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005909tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5910{
5911 struct trace_iterator *iter = filp->private_data;
5912
5913 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005914}
5915
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005916/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005917static int tracing_wait_pipe(struct file *filp)
5918{
5919 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005920 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005921
5922 while (trace_empty(iter)) {
5923
5924 if ((filp->f_flags & O_NONBLOCK)) {
5925 return -EAGAIN;
5926 }
5927
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005928 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005929 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005930 * We still block if tracing is disabled, but we have never
5931 * read anything. This allows a user to cat this file, and
5932 * then enable tracing. But after we have read something,
5933 * we give an EOF when tracing is again disabled.
5934 *
5935 * iter->pos will be 0 if we haven't read anything.
5936 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005937 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005938 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005939
5940 mutex_unlock(&iter->mutex);
5941
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05005942 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005943
5944 mutex_lock(&iter->mutex);
5945
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005946 if (ret)
5947 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005948 }
5949
5950 return 1;
5951}
5952
Steven Rostedtb3806b42008-05-12 21:20:46 +02005953/*
5954 * Consumer reader.
5955 */
5956static ssize_t
5957tracing_read_pipe(struct file *filp, char __user *ubuf,
5958 size_t cnt, loff_t *ppos)
5959{
5960 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005961 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005962
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005963 /*
5964 * Avoid more than one consumer on a single file descriptor
5965 * This is just a matter of traces coherency, the ring buffer itself
5966 * is protected.
5967 */
5968 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005969
5970 /* return any leftover data */
5971 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5972 if (sret != -EBUSY)
5973 goto out;
5974
5975 trace_seq_init(&iter->seq);
5976
Steven Rostedt107bad82008-05-12 21:21:01 +02005977 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005978 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5979 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005980 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005981 }
5982
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005983waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005984 sret = tracing_wait_pipe(filp);
5985 if (sret <= 0)
5986 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005987
5988 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005989 if (trace_empty(iter)) {
5990 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005991 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005992 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005993
5994 if (cnt >= PAGE_SIZE)
5995 cnt = PAGE_SIZE - 1;
5996
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005997 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005998 memset(&iter->seq, 0,
5999 sizeof(struct trace_iterator) -
6000 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006001 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006002 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006003
Lai Jiangshan4f535962009-05-18 19:35:34 +08006004 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006005 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006006 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006007 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006008 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006009
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006010 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006011 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006012 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006013 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006014 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006015 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006016 if (ret != TRACE_TYPE_NO_CONSUME)
6017 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006018
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006019 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006020 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006021
6022 /*
6023 * Setting the full flag means we reached the trace_seq buffer
6024 * size and we should leave by partial output condition above.
6025 * One of the trace_seq_* functions is not used properly.
6026 */
6027 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6028 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006029 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006030 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006031 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006032
Steven Rostedtb3806b42008-05-12 21:20:46 +02006033 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006034 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006035 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006036 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006037
6038 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006039 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006040 * entries, go back to wait for more entries.
6041 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006042 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006043 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006044
Steven Rostedt107bad82008-05-12 21:21:01 +02006045out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006046 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006047
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006048 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006049}
6050
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006051static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6052 unsigned int idx)
6053{
6054 __free_page(spd->pages[idx]);
6055}
6056
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006057static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006058 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006059 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006060 .steal = generic_pipe_buf_steal,
6061 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006062};
6063
Steven Rostedt34cd4992009-02-09 12:06:29 -05006064static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006065tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006066{
6067 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006068 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006069 int ret;
6070
6071 /* Seq buffer is page-sized, exactly what we need. */
6072 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006073 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006074 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006075
6076 if (trace_seq_has_overflowed(&iter->seq)) {
6077 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006078 break;
6079 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006080
6081 /*
6082 * This should not be hit, because it should only
6083 * be set if the iter->seq overflowed. But check it
6084 * anyway to be safe.
6085 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006086 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006087 iter->seq.seq.len = save_len;
6088 break;
6089 }
6090
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006091 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006092 if (rem < count) {
6093 rem = 0;
6094 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006095 break;
6096 }
6097
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006098 if (ret != TRACE_TYPE_NO_CONSUME)
6099 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006100 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006101 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006102 rem = 0;
6103 iter->ent = NULL;
6104 break;
6105 }
6106 }
6107
6108 return rem;
6109}
6110
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006111static ssize_t tracing_splice_read_pipe(struct file *filp,
6112 loff_t *ppos,
6113 struct pipe_inode_info *pipe,
6114 size_t len,
6115 unsigned int flags)
6116{
Jens Axboe35f3d142010-05-20 10:43:18 +02006117 struct page *pages_def[PIPE_DEF_BUFFERS];
6118 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006119 struct trace_iterator *iter = filp->private_data;
6120 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006121 .pages = pages_def,
6122 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006123 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006124 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006125 .ops = &tracing_pipe_buf_ops,
6126 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006127 };
6128 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006129 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006130 unsigned int i;
6131
Jens Axboe35f3d142010-05-20 10:43:18 +02006132 if (splice_grow_spd(pipe, &spd))
6133 return -ENOMEM;
6134
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006135 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006136
6137 if (iter->trace->splice_read) {
6138 ret = iter->trace->splice_read(iter, filp,
6139 ppos, pipe, len, flags);
6140 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006141 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006142 }
6143
6144 ret = tracing_wait_pipe(filp);
6145 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006146 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006147
Jason Wessel955b61e2010-08-05 09:22:23 -05006148 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006149 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006150 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006151 }
6152
Lai Jiangshan4f535962009-05-18 19:35:34 +08006153 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006154 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006155
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006156 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006157 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006158 spd.pages[i] = alloc_page(GFP_KERNEL);
6159 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006160 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006161
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006162 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006163
6164 /* Copy the data into the page, so we can start over. */
6165 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006166 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006167 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006168 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006169 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006170 break;
6171 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006172 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006173 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006174
Steven Rostedtf9520752009-03-02 14:04:40 -05006175 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006176 }
6177
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006178 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006179 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006180 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006181
6182 spd.nr_pages = i;
6183
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006184 if (i)
6185 ret = splice_to_pipe(pipe, &spd);
6186 else
6187 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006188out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006189 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006190 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006191
Steven Rostedt34cd4992009-02-09 12:06:29 -05006192out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006193 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006194 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006195}
6196
Steven Rostedta98a3c32008-05-12 21:20:59 +02006197static ssize_t
6198tracing_entries_read(struct file *filp, char __user *ubuf,
6199 size_t cnt, loff_t *ppos)
6200{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006201 struct inode *inode = file_inode(filp);
6202 struct trace_array *tr = inode->i_private;
6203 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006204 char buf[64];
6205 int r = 0;
6206 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006207
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006208 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006209
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006210 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006211 int cpu, buf_size_same;
6212 unsigned long size;
6213
6214 size = 0;
6215 buf_size_same = 1;
6216 /* check if all cpu sizes are same */
6217 for_each_tracing_cpu(cpu) {
6218 /* fill in the size from first enabled cpu */
6219 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006220 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6221 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006222 buf_size_same = 0;
6223 break;
6224 }
6225 }
6226
6227 if (buf_size_same) {
6228 if (!ring_buffer_expanded)
6229 r = sprintf(buf, "%lu (expanded: %lu)\n",
6230 size >> 10,
6231 trace_buf_size >> 10);
6232 else
6233 r = sprintf(buf, "%lu\n", size >> 10);
6234 } else
6235 r = sprintf(buf, "X\n");
6236 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006237 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006238
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006239 mutex_unlock(&trace_types_lock);
6240
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006241 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6242 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006243}
6244
6245static ssize_t
6246tracing_entries_write(struct file *filp, const char __user *ubuf,
6247 size_t cnt, loff_t *ppos)
6248{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006249 struct inode *inode = file_inode(filp);
6250 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006251 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006252 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006253
Peter Huewe22fe9b52011-06-07 21:58:27 +02006254 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6255 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006256 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006257
6258 /* must have at least 1 entry */
6259 if (!val)
6260 return -EINVAL;
6261
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006262 /* value is in KB */
6263 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006264 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006265 if (ret < 0)
6266 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006267
Jiri Olsacf8517c2009-10-23 19:36:16 -04006268 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006269
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006270 return cnt;
6271}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006272
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006273static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006274tracing_total_entries_read(struct file *filp, char __user *ubuf,
6275 size_t cnt, loff_t *ppos)
6276{
6277 struct trace_array *tr = filp->private_data;
6278 char buf[64];
6279 int r, cpu;
6280 unsigned long size = 0, expanded_size = 0;
6281
6282 mutex_lock(&trace_types_lock);
6283 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006284 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006285 if (!ring_buffer_expanded)
6286 expanded_size += trace_buf_size >> 10;
6287 }
6288 if (ring_buffer_expanded)
6289 r = sprintf(buf, "%lu\n", size);
6290 else
6291 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6292 mutex_unlock(&trace_types_lock);
6293
6294 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6295}
6296
6297static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006298tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6299 size_t cnt, loff_t *ppos)
6300{
6301 /*
6302 * There is no need to read what the user has written, this function
6303 * is just to make sure that there is no error when "echo" is used
6304 */
6305
6306 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006307
6308 return cnt;
6309}
6310
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006311static int
6312tracing_free_buffer_release(struct inode *inode, struct file *filp)
6313{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006314 struct trace_array *tr = inode->i_private;
6315
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006316 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006317 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006318 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006319 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006320 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006321
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006322 trace_array_put(tr);
6323
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006324 return 0;
6325}
6326
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006327static ssize_t
6328tracing_mark_write(struct file *filp, const char __user *ubuf,
6329 size_t cnt, loff_t *fpos)
6330{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006331 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006332 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006333 enum event_trigger_type tt = ETT_NONE;
Steven Rostedtd696b582011-09-22 11:50:27 -04006334 struct ring_buffer *buffer;
6335 struct print_entry *entry;
6336 unsigned long irq_flags;
Steven Rostedtd696b582011-09-22 11:50:27 -04006337 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006338 int size;
6339 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006340
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006341/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006342#define FAULTED_STR "<faulted>"
6343#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006344
Steven Rostedtc76f0692008-11-07 22:36:02 -05006345 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006346 return -EINVAL;
6347
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006348 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006349 return -EINVAL;
6350
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006351 if (cnt > TRACE_BUF_SIZE)
6352 cnt = TRACE_BUF_SIZE;
6353
Steven Rostedtd696b582011-09-22 11:50:27 -04006354 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006355
Steven Rostedtd696b582011-09-22 11:50:27 -04006356 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006357 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6358
6359 /* If less than "<faulted>", then make sure we can still add that */
6360 if (cnt < FAULTED_SIZE)
6361 size += FAULTED_SIZE - cnt;
6362
Alexander Z Lam2d716192013-07-01 15:31:24 -07006363 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006364 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6365 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006366 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006367 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006368 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006369
6370 entry = ring_buffer_event_data(event);
6371 entry->ip = _THIS_IP_;
6372
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006373 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6374 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006375 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006376 cnt = FAULTED_SIZE;
6377 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006378 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006379 written = cnt;
6380 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006381
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006382 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6383 /* do not add \n before testing triggers, but add \0 */
6384 entry->buf[cnt] = '\0';
6385 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6386 }
6387
Steven Rostedtd696b582011-09-22 11:50:27 -04006388 if (entry->buf[cnt - 1] != '\n') {
6389 entry->buf[cnt] = '\n';
6390 entry->buf[cnt + 1] = '\0';
6391 } else
6392 entry->buf[cnt] = '\0';
6393
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006394 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006395
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006396 if (tt)
6397 event_triggers_post_call(tr->trace_marker_file, tt);
6398
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006399 if (written > 0)
6400 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006401
Steven Rostedtfa32e852016-07-06 15:25:08 -04006402 return written;
6403}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006404
Steven Rostedtfa32e852016-07-06 15:25:08 -04006405/* Limit it for now to 3K (including tag) */
6406#define RAW_DATA_MAX_SIZE (1024*3)
6407
6408static ssize_t
6409tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6410 size_t cnt, loff_t *fpos)
6411{
6412 struct trace_array *tr = filp->private_data;
6413 struct ring_buffer_event *event;
6414 struct ring_buffer *buffer;
6415 struct raw_data_entry *entry;
6416 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006417 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006418 int size;
6419 int len;
6420
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006421#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6422
Steven Rostedtfa32e852016-07-06 15:25:08 -04006423 if (tracing_disabled)
6424 return -EINVAL;
6425
6426 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6427 return -EINVAL;
6428
6429 /* The marker must at least have a tag id */
6430 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6431 return -EINVAL;
6432
6433 if (cnt > TRACE_BUF_SIZE)
6434 cnt = TRACE_BUF_SIZE;
6435
6436 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6437
Steven Rostedtfa32e852016-07-06 15:25:08 -04006438 local_save_flags(irq_flags);
6439 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006440 if (cnt < FAULT_SIZE_ID)
6441 size += FAULT_SIZE_ID - cnt;
6442
Steven Rostedtfa32e852016-07-06 15:25:08 -04006443 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006444 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6445 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006446 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006447 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006448 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006449
6450 entry = ring_buffer_event_data(event);
6451
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006452 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6453 if (len) {
6454 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006455 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006456 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006457 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006458 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006459
6460 __buffer_unlock_commit(buffer, event);
6461
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006462 if (written > 0)
6463 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006464
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006465 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006466}
6467
Li Zefan13f16d22009-12-08 11:16:11 +08006468static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006469{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006470 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006471 int i;
6472
6473 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006474 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006475 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006476 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6477 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006478 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006479
Li Zefan13f16d22009-12-08 11:16:11 +08006480 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006481}
6482
Tom Zanussid71bd342018-01-15 20:52:07 -06006483int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006484{
Zhaolei5079f322009-08-25 16:12:56 +08006485 int i;
6486
Zhaolei5079f322009-08-25 16:12:56 +08006487 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6488 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6489 break;
6490 }
6491 if (i == ARRAY_SIZE(trace_clocks))
6492 return -EINVAL;
6493
Zhaolei5079f322009-08-25 16:12:56 +08006494 mutex_lock(&trace_types_lock);
6495
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006496 tr->clock_id = i;
6497
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006498 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006499
David Sharp60303ed2012-10-11 16:27:52 -07006500 /*
6501 * New clock may not be consistent with the previous clock.
6502 * Reset the buffer so that it doesn't have incomparable timestamps.
6503 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006504 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006505
6506#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006507 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006508 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006509 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006510#endif
David Sharp60303ed2012-10-11 16:27:52 -07006511
Zhaolei5079f322009-08-25 16:12:56 +08006512 mutex_unlock(&trace_types_lock);
6513
Steven Rostedte1e232c2014-02-10 23:38:46 -05006514 return 0;
6515}
6516
6517static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6518 size_t cnt, loff_t *fpos)
6519{
6520 struct seq_file *m = filp->private_data;
6521 struct trace_array *tr = m->private;
6522 char buf[64];
6523 const char *clockstr;
6524 int ret;
6525
6526 if (cnt >= sizeof(buf))
6527 return -EINVAL;
6528
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006529 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006530 return -EFAULT;
6531
6532 buf[cnt] = 0;
6533
6534 clockstr = strstrip(buf);
6535
6536 ret = tracing_set_clock(tr, clockstr);
6537 if (ret)
6538 return ret;
6539
Zhaolei5079f322009-08-25 16:12:56 +08006540 *fpos += cnt;
6541
6542 return cnt;
6543}
6544
Li Zefan13f16d22009-12-08 11:16:11 +08006545static int tracing_clock_open(struct inode *inode, struct file *file)
6546{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006547 struct trace_array *tr = inode->i_private;
6548 int ret;
6549
Li Zefan13f16d22009-12-08 11:16:11 +08006550 if (tracing_disabled)
6551 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006552
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006553 if (trace_array_get(tr))
6554 return -ENODEV;
6555
6556 ret = single_open(file, tracing_clock_show, inode->i_private);
6557 if (ret < 0)
6558 trace_array_put(tr);
6559
6560 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006561}
6562
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006563static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6564{
6565 struct trace_array *tr = m->private;
6566
6567 mutex_lock(&trace_types_lock);
6568
6569 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6570 seq_puts(m, "delta [absolute]\n");
6571 else
6572 seq_puts(m, "[delta] absolute\n");
6573
6574 mutex_unlock(&trace_types_lock);
6575
6576 return 0;
6577}
6578
6579static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6580{
6581 struct trace_array *tr = inode->i_private;
6582 int ret;
6583
6584 if (tracing_disabled)
6585 return -ENODEV;
6586
6587 if (trace_array_get(tr))
6588 return -ENODEV;
6589
6590 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6591 if (ret < 0)
6592 trace_array_put(tr);
6593
6594 return ret;
6595}
6596
Tom Zanussi00b41452018-01-15 20:51:39 -06006597int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6598{
6599 int ret = 0;
6600
6601 mutex_lock(&trace_types_lock);
6602
6603 if (abs && tr->time_stamp_abs_ref++)
6604 goto out;
6605
6606 if (!abs) {
6607 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6608 ret = -EINVAL;
6609 goto out;
6610 }
6611
6612 if (--tr->time_stamp_abs_ref)
6613 goto out;
6614 }
6615
6616 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6617
6618#ifdef CONFIG_TRACER_MAX_TRACE
6619 if (tr->max_buffer.buffer)
6620 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6621#endif
6622 out:
6623 mutex_unlock(&trace_types_lock);
6624
6625 return ret;
6626}
6627
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006628struct ftrace_buffer_info {
6629 struct trace_iterator iter;
6630 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006631 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006632 unsigned int read;
6633};
6634
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006635#ifdef CONFIG_TRACER_SNAPSHOT
6636static int tracing_snapshot_open(struct inode *inode, struct file *file)
6637{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006638 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006639 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006640 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006641 int ret = 0;
6642
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006643 if (trace_array_get(tr) < 0)
6644 return -ENODEV;
6645
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006646 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006647 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006648 if (IS_ERR(iter))
6649 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006650 } else {
6651 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006652 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006653 m = kzalloc(sizeof(*m), GFP_KERNEL);
6654 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006655 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006656 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6657 if (!iter) {
6658 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006659 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006660 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006661 ret = 0;
6662
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006663 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006664 iter->trace_buffer = &tr->max_buffer;
6665 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006666 m->private = iter;
6667 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006668 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006669out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006670 if (ret < 0)
6671 trace_array_put(tr);
6672
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006673 return ret;
6674}
6675
6676static ssize_t
6677tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6678 loff_t *ppos)
6679{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006680 struct seq_file *m = filp->private_data;
6681 struct trace_iterator *iter = m->private;
6682 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006683 unsigned long val;
6684 int ret;
6685
6686 ret = tracing_update_buffers();
6687 if (ret < 0)
6688 return ret;
6689
6690 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6691 if (ret)
6692 return ret;
6693
6694 mutex_lock(&trace_types_lock);
6695
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006696 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006697 ret = -EBUSY;
6698 goto out;
6699 }
6700
Tom Zanussia35873a2019-02-13 17:42:45 -06006701 arch_spin_lock(&tr->max_lock);
6702 if (tr->cond_snapshot)
6703 ret = -EBUSY;
6704 arch_spin_unlock(&tr->max_lock);
6705 if (ret)
6706 goto out;
6707
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006708 switch (val) {
6709 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006710 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6711 ret = -EINVAL;
6712 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006713 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006714 if (tr->allocated_snapshot)
6715 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006716 break;
6717 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006718/* Only allow per-cpu swap if the ring buffer supports it */
6719#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6720 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6721 ret = -EINVAL;
6722 break;
6723 }
6724#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006725 if (tr->allocated_snapshot)
6726 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6727 &tr->trace_buffer, iter->cpu_file);
6728 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006729 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09006730 if (ret < 0)
6731 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006732 local_irq_disable();
6733 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006734 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006735 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006736 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006737 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006738 local_irq_enable();
6739 break;
6740 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006741 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006742 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6743 tracing_reset_online_cpus(&tr->max_buffer);
6744 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04006745 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006746 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006747 break;
6748 }
6749
6750 if (ret >= 0) {
6751 *ppos += cnt;
6752 ret = cnt;
6753 }
6754out:
6755 mutex_unlock(&trace_types_lock);
6756 return ret;
6757}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006758
6759static int tracing_snapshot_release(struct inode *inode, struct file *file)
6760{
6761 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006762 int ret;
6763
6764 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006765
6766 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006767 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006768
6769 /* If write only, the seq_file is just a stub */
6770 if (m)
6771 kfree(m->private);
6772 kfree(m);
6773
6774 return 0;
6775}
6776
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006777static int tracing_buffers_open(struct inode *inode, struct file *filp);
6778static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6779 size_t count, loff_t *ppos);
6780static int tracing_buffers_release(struct inode *inode, struct file *file);
6781static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6782 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6783
6784static int snapshot_raw_open(struct inode *inode, struct file *filp)
6785{
6786 struct ftrace_buffer_info *info;
6787 int ret;
6788
6789 ret = tracing_buffers_open(inode, filp);
6790 if (ret < 0)
6791 return ret;
6792
6793 info = filp->private_data;
6794
6795 if (info->iter.trace->use_max_tr) {
6796 tracing_buffers_release(inode, filp);
6797 return -EBUSY;
6798 }
6799
6800 info->iter.snapshot = true;
6801 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6802
6803 return ret;
6804}
6805
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006806#endif /* CONFIG_TRACER_SNAPSHOT */
6807
6808
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006809static const struct file_operations tracing_thresh_fops = {
6810 .open = tracing_open_generic,
6811 .read = tracing_thresh_read,
6812 .write = tracing_thresh_write,
6813 .llseek = generic_file_llseek,
6814};
6815
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006816#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006817static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006818 .open = tracing_open_generic,
6819 .read = tracing_max_lat_read,
6820 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006821 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006822};
Chen Gange428abb2015-11-10 05:15:15 +08006823#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006824
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006825static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006826 .open = tracing_open_generic,
6827 .read = tracing_set_trace_read,
6828 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006829 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006830};
6831
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006832static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006833 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006834 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006835 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006836 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006837 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006838 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006839};
6840
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006841static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006842 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006843 .read = tracing_entries_read,
6844 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006845 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006846 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006847};
6848
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006849static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006850 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006851 .read = tracing_total_entries_read,
6852 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006853 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006854};
6855
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006856static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006857 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006858 .write = tracing_free_buffer_write,
6859 .release = tracing_free_buffer_release,
6860};
6861
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006862static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006863 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006864 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006865 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006866 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006867};
6868
Steven Rostedtfa32e852016-07-06 15:25:08 -04006869static const struct file_operations tracing_mark_raw_fops = {
6870 .open = tracing_open_generic_tr,
6871 .write = tracing_mark_raw_write,
6872 .llseek = generic_file_llseek,
6873 .release = tracing_release_generic_tr,
6874};
6875
Zhaolei5079f322009-08-25 16:12:56 +08006876static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006877 .open = tracing_clock_open,
6878 .read = seq_read,
6879 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006880 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006881 .write = tracing_clock_write,
6882};
6883
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006884static const struct file_operations trace_time_stamp_mode_fops = {
6885 .open = tracing_time_stamp_mode_open,
6886 .read = seq_read,
6887 .llseek = seq_lseek,
6888 .release = tracing_single_release_tr,
6889};
6890
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006891#ifdef CONFIG_TRACER_SNAPSHOT
6892static const struct file_operations snapshot_fops = {
6893 .open = tracing_snapshot_open,
6894 .read = seq_read,
6895 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006896 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006897 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006898};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006899
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006900static const struct file_operations snapshot_raw_fops = {
6901 .open = snapshot_raw_open,
6902 .read = tracing_buffers_read,
6903 .release = tracing_buffers_release,
6904 .splice_read = tracing_buffers_splice_read,
6905 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006906};
6907
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006908#endif /* CONFIG_TRACER_SNAPSHOT */
6909
Tom Zanussi8a062902019-03-31 18:48:15 -05006910#define TRACING_LOG_ERRS_MAX 8
6911#define TRACING_LOG_LOC_MAX 128
6912
6913#define CMD_PREFIX " Command: "
6914
6915struct err_info {
6916 const char **errs; /* ptr to loc-specific array of err strings */
6917 u8 type; /* index into errs -> specific err string */
6918 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6919 u64 ts;
6920};
6921
6922struct tracing_log_err {
6923 struct list_head list;
6924 struct err_info info;
6925 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6926 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6927};
6928
Tom Zanussi8a062902019-03-31 18:48:15 -05006929static DEFINE_MUTEX(tracing_err_log_lock);
6930
YueHaibingff585c52019-06-14 23:32:10 +08006931static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05006932{
6933 struct tracing_log_err *err;
6934
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006935 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05006936 err = kzalloc(sizeof(*err), GFP_KERNEL);
6937 if (!err)
6938 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006939 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05006940
6941 return err;
6942 }
6943
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006944 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05006945 list_del(&err->list);
6946
6947 return err;
6948}
6949
6950/**
6951 * err_pos - find the position of a string within a command for error careting
6952 * @cmd: The tracing command that caused the error
6953 * @str: The string to position the caret at within @cmd
6954 *
6955 * Finds the position of the first occurence of @str within @cmd. The
6956 * return value can be passed to tracing_log_err() for caret placement
6957 * within @cmd.
6958 *
6959 * Returns the index within @cmd of the first occurence of @str or 0
6960 * if @str was not found.
6961 */
6962unsigned int err_pos(char *cmd, const char *str)
6963{
6964 char *found;
6965
6966 if (WARN_ON(!strlen(cmd)))
6967 return 0;
6968
6969 found = strstr(cmd, str);
6970 if (found)
6971 return found - cmd;
6972
6973 return 0;
6974}
6975
6976/**
6977 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04006978 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05006979 * @loc: A string describing where the error occurred
6980 * @cmd: The tracing command that caused the error
6981 * @errs: The array of loc-specific static error strings
6982 * @type: The index into errs[], which produces the specific static err string
6983 * @pos: The position the caret should be placed in the cmd
6984 *
6985 * Writes an error into tracing/error_log of the form:
6986 *
6987 * <loc>: error: <text>
6988 * Command: <cmd>
6989 * ^
6990 *
6991 * tracing/error_log is a small log file containing the last
6992 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
6993 * unless there has been a tracing error, and the error log can be
6994 * cleared and have its memory freed by writing the empty string in
6995 * truncation mode to it i.e. echo > tracing/error_log.
6996 *
6997 * NOTE: the @errs array along with the @type param are used to
6998 * produce a static error string - this string is not copied and saved
6999 * when the error is logged - only a pointer to it is saved. See
7000 * existing callers for examples of how static strings are typically
7001 * defined for use with tracing_log_err().
7002 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007003void tracing_log_err(struct trace_array *tr,
7004 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007005 const char **errs, u8 type, u8 pos)
7006{
7007 struct tracing_log_err *err;
7008
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007009 if (!tr)
7010 tr = &global_trace;
7011
Tom Zanussi8a062902019-03-31 18:48:15 -05007012 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007013 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007014 if (PTR_ERR(err) == -ENOMEM) {
7015 mutex_unlock(&tracing_err_log_lock);
7016 return;
7017 }
7018
7019 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7020 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7021
7022 err->info.errs = errs;
7023 err->info.type = type;
7024 err->info.pos = pos;
7025 err->info.ts = local_clock();
7026
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007027 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007028 mutex_unlock(&tracing_err_log_lock);
7029}
7030
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007031static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007032{
7033 struct tracing_log_err *err, *next;
7034
7035 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007036 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007037 list_del(&err->list);
7038 kfree(err);
7039 }
7040
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007041 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007042 mutex_unlock(&tracing_err_log_lock);
7043}
7044
7045static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7046{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007047 struct trace_array *tr = m->private;
7048
Tom Zanussi8a062902019-03-31 18:48:15 -05007049 mutex_lock(&tracing_err_log_lock);
7050
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007051 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007052}
7053
7054static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7055{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007056 struct trace_array *tr = m->private;
7057
7058 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007059}
7060
7061static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7062{
7063 mutex_unlock(&tracing_err_log_lock);
7064}
7065
7066static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7067{
7068 u8 i;
7069
7070 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7071 seq_putc(m, ' ');
7072 for (i = 0; i < pos; i++)
7073 seq_putc(m, ' ');
7074 seq_puts(m, "^\n");
7075}
7076
7077static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7078{
7079 struct tracing_log_err *err = v;
7080
7081 if (err) {
7082 const char *err_text = err->info.errs[err->info.type];
7083 u64 sec = err->info.ts;
7084 u32 nsec;
7085
7086 nsec = do_div(sec, NSEC_PER_SEC);
7087 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7088 err->loc, err_text);
7089 seq_printf(m, "%s", err->cmd);
7090 tracing_err_log_show_pos(m, err->info.pos);
7091 }
7092
7093 return 0;
7094}
7095
7096static const struct seq_operations tracing_err_log_seq_ops = {
7097 .start = tracing_err_log_seq_start,
7098 .next = tracing_err_log_seq_next,
7099 .stop = tracing_err_log_seq_stop,
7100 .show = tracing_err_log_seq_show
7101};
7102
7103static int tracing_err_log_open(struct inode *inode, struct file *file)
7104{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007105 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007106 int ret = 0;
7107
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007108 if (trace_array_get(tr) < 0)
7109 return -ENODEV;
7110
Tom Zanussi8a062902019-03-31 18:48:15 -05007111 /* If this file was opened for write, then erase contents */
7112 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007113 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007114
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007115 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007116 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007117 if (!ret) {
7118 struct seq_file *m = file->private_data;
7119 m->private = tr;
7120 } else {
7121 trace_array_put(tr);
7122 }
7123 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007124 return ret;
7125}
7126
7127static ssize_t tracing_err_log_write(struct file *file,
7128 const char __user *buffer,
7129 size_t count, loff_t *ppos)
7130{
7131 return count;
7132}
7133
Takeshi Misawad122ed62019-06-28 19:56:40 +09007134static int tracing_err_log_release(struct inode *inode, struct file *file)
7135{
7136 struct trace_array *tr = inode->i_private;
7137
7138 trace_array_put(tr);
7139
7140 if (file->f_mode & FMODE_READ)
7141 seq_release(inode, file);
7142
7143 return 0;
7144}
7145
Tom Zanussi8a062902019-03-31 18:48:15 -05007146static const struct file_operations tracing_err_log_fops = {
7147 .open = tracing_err_log_open,
7148 .write = tracing_err_log_write,
7149 .read = seq_read,
7150 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007151 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007152};
7153
Steven Rostedt2cadf912008-12-01 22:20:19 -05007154static int tracing_buffers_open(struct inode *inode, struct file *filp)
7155{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007156 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007157 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007158 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007159
7160 if (tracing_disabled)
7161 return -ENODEV;
7162
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007163 if (trace_array_get(tr) < 0)
7164 return -ENODEV;
7165
Steven Rostedt2cadf912008-12-01 22:20:19 -05007166 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007167 if (!info) {
7168 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007169 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007170 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007171
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007172 mutex_lock(&trace_types_lock);
7173
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007174 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007175 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007176 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007177 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007178 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007179 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007180 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007181
7182 filp->private_data = info;
7183
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007184 tr->current_trace->ref++;
7185
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007186 mutex_unlock(&trace_types_lock);
7187
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007188 ret = nonseekable_open(inode, filp);
7189 if (ret < 0)
7190 trace_array_put(tr);
7191
7192 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007193}
7194
Al Viro9dd95742017-07-03 00:42:43 -04007195static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007196tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7197{
7198 struct ftrace_buffer_info *info = filp->private_data;
7199 struct trace_iterator *iter = &info->iter;
7200
7201 return trace_poll(iter, filp, poll_table);
7202}
7203
Steven Rostedt2cadf912008-12-01 22:20:19 -05007204static ssize_t
7205tracing_buffers_read(struct file *filp, char __user *ubuf,
7206 size_t count, loff_t *ppos)
7207{
7208 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007209 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007210 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007211 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007212
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007213 if (!count)
7214 return 0;
7215
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007216#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007217 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7218 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007219#endif
7220
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007221 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007222 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7223 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007224 if (IS_ERR(info->spare)) {
7225 ret = PTR_ERR(info->spare);
7226 info->spare = NULL;
7227 } else {
7228 info->spare_cpu = iter->cpu_file;
7229 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007230 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007231 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007232 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007233
Steven Rostedt2cadf912008-12-01 22:20:19 -05007234 /* Do we have previous read data to read? */
7235 if (info->read < PAGE_SIZE)
7236 goto read;
7237
Steven Rostedtb6273442013-02-28 13:44:11 -05007238 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007239 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007240 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007241 &info->spare,
7242 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007243 iter->cpu_file, 0);
7244 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007245
7246 if (ret < 0) {
7247 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007248 if ((filp->f_flags & O_NONBLOCK))
7249 return -EAGAIN;
7250
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007251 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007252 if (ret)
7253 return ret;
7254
Steven Rostedtb6273442013-02-28 13:44:11 -05007255 goto again;
7256 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007257 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007258 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007259
Steven Rostedt436fc282011-10-14 10:44:25 -04007260 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007261 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007262 size = PAGE_SIZE - info->read;
7263 if (size > count)
7264 size = count;
7265
7266 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007267 if (ret == size)
7268 return -EFAULT;
7269
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007270 size -= ret;
7271
Steven Rostedt2cadf912008-12-01 22:20:19 -05007272 *ppos += size;
7273 info->read += size;
7274
7275 return size;
7276}
7277
7278static int tracing_buffers_release(struct inode *inode, struct file *file)
7279{
7280 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007281 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007282
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007283 mutex_lock(&trace_types_lock);
7284
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007285 iter->tr->current_trace->ref--;
7286
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007287 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007288
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007289 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007290 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7291 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007292 kfree(info);
7293
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007294 mutex_unlock(&trace_types_lock);
7295
Steven Rostedt2cadf912008-12-01 22:20:19 -05007296 return 0;
7297}
7298
7299struct buffer_ref {
7300 struct ring_buffer *buffer;
7301 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007302 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007303 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007304};
7305
Jann Hornb9872222019-04-04 23:59:25 +02007306static void buffer_ref_release(struct buffer_ref *ref)
7307{
7308 if (!refcount_dec_and_test(&ref->refcount))
7309 return;
7310 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7311 kfree(ref);
7312}
7313
Steven Rostedt2cadf912008-12-01 22:20:19 -05007314static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7315 struct pipe_buffer *buf)
7316{
7317 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7318
Jann Hornb9872222019-04-04 23:59:25 +02007319 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007320 buf->private = 0;
7321}
7322
Matthew Wilcox15fab632019-04-05 14:02:10 -07007323static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007324 struct pipe_buffer *buf)
7325{
7326 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7327
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007328 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007329 return false;
7330
Jann Hornb9872222019-04-04 23:59:25 +02007331 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007332 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007333}
7334
7335/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007336static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007337 .confirm = generic_pipe_buf_confirm,
7338 .release = buffer_pipe_buf_release,
Jann Hornb9872222019-04-04 23:59:25 +02007339 .steal = generic_pipe_buf_nosteal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007340 .get = buffer_pipe_buf_get,
7341};
7342
7343/*
7344 * Callback from splice_to_pipe(), if we need to release some pages
7345 * at the end of the spd in case we error'ed out in filling the pipe.
7346 */
7347static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7348{
7349 struct buffer_ref *ref =
7350 (struct buffer_ref *)spd->partial[i].private;
7351
Jann Hornb9872222019-04-04 23:59:25 +02007352 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007353 spd->partial[i].private = 0;
7354}
7355
7356static ssize_t
7357tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7358 struct pipe_inode_info *pipe, size_t len,
7359 unsigned int flags)
7360{
7361 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007362 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007363 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7364 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007365 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007366 .pages = pages_def,
7367 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007368 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007369 .ops = &buffer_pipe_buf_ops,
7370 .spd_release = buffer_spd_release,
7371 };
7372 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007373 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007374 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007375
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007376#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007377 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7378 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007379#endif
7380
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007381 if (*ppos & (PAGE_SIZE - 1))
7382 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007383
7384 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007385 if (len < PAGE_SIZE)
7386 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007387 len &= PAGE_MASK;
7388 }
7389
Al Viro1ae22932016-09-17 18:31:46 -04007390 if (splice_grow_spd(pipe, &spd))
7391 return -ENOMEM;
7392
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007393 again:
7394 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007395 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007396
Al Viroa786c062014-04-11 12:01:03 -04007397 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007398 struct page *page;
7399 int r;
7400
7401 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007402 if (!ref) {
7403 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007404 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007405 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007406
Jann Hornb9872222019-04-04 23:59:25 +02007407 refcount_set(&ref->refcount, 1);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007408 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007409 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007410 if (IS_ERR(ref->page)) {
7411 ret = PTR_ERR(ref->page);
7412 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007413 kfree(ref);
7414 break;
7415 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007416 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007417
7418 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007419 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007420 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007421 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7422 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007423 kfree(ref);
7424 break;
7425 }
7426
Steven Rostedt2cadf912008-12-01 22:20:19 -05007427 page = virt_to_page(ref->page);
7428
7429 spd.pages[i] = page;
7430 spd.partial[i].len = PAGE_SIZE;
7431 spd.partial[i].offset = 0;
7432 spd.partial[i].private = (unsigned long)ref;
7433 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007434 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007435
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007436 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007437 }
7438
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007439 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007440 spd.nr_pages = i;
7441
7442 /* did we read anything? */
7443 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007444 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007445 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007446
Al Viro1ae22932016-09-17 18:31:46 -04007447 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007448 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007449 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007450
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007451 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007452 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007453 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007454
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007455 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007456 }
7457
7458 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007459out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007460 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007461
Steven Rostedt2cadf912008-12-01 22:20:19 -05007462 return ret;
7463}
7464
7465static const struct file_operations tracing_buffers_fops = {
7466 .open = tracing_buffers_open,
7467 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007468 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007469 .release = tracing_buffers_release,
7470 .splice_read = tracing_buffers_splice_read,
7471 .llseek = no_llseek,
7472};
7473
Steven Rostedtc8d77182009-04-29 18:03:45 -04007474static ssize_t
7475tracing_stats_read(struct file *filp, char __user *ubuf,
7476 size_t count, loff_t *ppos)
7477{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007478 struct inode *inode = file_inode(filp);
7479 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007480 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007481 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007482 struct trace_seq *s;
7483 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007484 unsigned long long t;
7485 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007486
Li Zefane4f2d102009-06-15 10:57:28 +08007487 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007488 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007489 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007490
7491 trace_seq_init(s);
7492
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007493 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007494 trace_seq_printf(s, "entries: %ld\n", cnt);
7495
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007496 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007497 trace_seq_printf(s, "overrun: %ld\n", cnt);
7498
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007499 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007500 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7501
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007502 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007503 trace_seq_printf(s, "bytes: %ld\n", cnt);
7504
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007505 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007506 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007507 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007508 usec_rem = do_div(t, USEC_PER_SEC);
7509 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7510 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007511
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007512 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007513 usec_rem = do_div(t, USEC_PER_SEC);
7514 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7515 } else {
7516 /* counter or tsc mode for trace_clock */
7517 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007518 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007519
7520 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007521 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007522 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007523
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007524 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007525 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7526
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007527 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007528 trace_seq_printf(s, "read events: %ld\n", cnt);
7529
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007530 count = simple_read_from_buffer(ubuf, count, ppos,
7531 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007532
7533 kfree(s);
7534
7535 return count;
7536}
7537
7538static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007539 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007540 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007541 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007542 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007543};
7544
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007545#ifdef CONFIG_DYNAMIC_FTRACE
7546
7547static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007548tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007549 size_t cnt, loff_t *ppos)
7550{
7551 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007552 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007553 int r;
7554
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007555 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007556 buf[r++] = '\n';
7557
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007558 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007559}
7560
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007561static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007562 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007563 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007564 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007565};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007566#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007567
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007568#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7569static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007570ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007571 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007572 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007573{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007574 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007575}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007576
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007577static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007578ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007579 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007580 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007581{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007582 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007583 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007584
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007585 if (mapper)
7586 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007587
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007588 if (count) {
7589
7590 if (*count <= 0)
7591 return;
7592
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007593 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007594 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007595
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007596 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007597}
7598
7599static int
7600ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7601 struct ftrace_probe_ops *ops, void *data)
7602{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007603 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007604 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007605
7606 seq_printf(m, "%ps:", (void *)ip);
7607
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007608 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007609
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007610 if (mapper)
7611 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7612
7613 if (count)
7614 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007615 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007616 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007617
7618 return 0;
7619}
7620
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007621static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007622ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007623 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007624{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007625 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007626
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007627 if (!mapper) {
7628 mapper = allocate_ftrace_func_mapper();
7629 if (!mapper)
7630 return -ENOMEM;
7631 *data = mapper;
7632 }
7633
7634 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007635}
7636
7637static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007638ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007639 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007640{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007641 struct ftrace_func_mapper *mapper = data;
7642
7643 if (!ip) {
7644 if (!mapper)
7645 return;
7646 free_ftrace_func_mapper(mapper, NULL);
7647 return;
7648 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007649
7650 ftrace_func_mapper_remove_ip(mapper, ip);
7651}
7652
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007653static struct ftrace_probe_ops snapshot_probe_ops = {
7654 .func = ftrace_snapshot,
7655 .print = ftrace_snapshot_print,
7656};
7657
7658static struct ftrace_probe_ops snapshot_count_probe_ops = {
7659 .func = ftrace_count_snapshot,
7660 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007661 .init = ftrace_snapshot_init,
7662 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007663};
7664
7665static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007666ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007667 char *glob, char *cmd, char *param, int enable)
7668{
7669 struct ftrace_probe_ops *ops;
7670 void *count = (void *)-1;
7671 char *number;
7672 int ret;
7673
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007674 if (!tr)
7675 return -ENODEV;
7676
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007677 /* hash funcs only work with set_ftrace_filter */
7678 if (!enable)
7679 return -EINVAL;
7680
7681 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7682
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007683 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007684 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007685
7686 if (!param)
7687 goto out_reg;
7688
7689 number = strsep(&param, ":");
7690
7691 if (!strlen(number))
7692 goto out_reg;
7693
7694 /*
7695 * We use the callback data field (which is a pointer)
7696 * as our counter.
7697 */
7698 ret = kstrtoul(number, 0, (unsigned long *)&count);
7699 if (ret)
7700 return ret;
7701
7702 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007703 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007704 if (ret < 0)
7705 goto out;
7706
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007707 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007708
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007709 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007710 return ret < 0 ? ret : 0;
7711}
7712
7713static struct ftrace_func_command ftrace_snapshot_cmd = {
7714 .name = "snapshot",
7715 .func = ftrace_trace_snapshot_callback,
7716};
7717
Tom Zanussi38de93a2013-10-24 08:34:18 -05007718static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007719{
7720 return register_ftrace_command(&ftrace_snapshot_cmd);
7721}
7722#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007723static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007724#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007725
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007726static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007727{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007728 if (WARN_ON(!tr->dir))
7729 return ERR_PTR(-ENODEV);
7730
7731 /* Top directory uses NULL as the parent */
7732 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7733 return NULL;
7734
7735 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007736 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007737}
7738
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007739static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7740{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007741 struct dentry *d_tracer;
7742
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007743 if (tr->percpu_dir)
7744 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007745
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007746 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007747 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007748 return NULL;
7749
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007750 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007751
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007752 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007753 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007754
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007755 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007756}
7757
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007758static struct dentry *
7759trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7760 void *data, long cpu, const struct file_operations *fops)
7761{
7762 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7763
7764 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007765 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007766 return ret;
7767}
7768
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007769static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007770tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007771{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007772 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007773 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007774 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007775
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007776 if (!d_percpu)
7777 return;
7778
Steven Rostedtdd49a382010-10-20 21:51:26 -04007779 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007780 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007781 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007782 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007783 return;
7784 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007785
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007786 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007787 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007788 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007789
7790 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007791 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007792 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007793
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007794 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007795 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007796
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007797 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007798 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007799
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007800 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007801 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007802
7803#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007804 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007805 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007806
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007807 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007808 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007809#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007810}
7811
Steven Rostedt60a11772008-05-12 21:20:44 +02007812#ifdef CONFIG_FTRACE_SELFTEST
7813/* Let selftest have access to static functions in this file */
7814#include "trace_selftest.c"
7815#endif
7816
Steven Rostedt577b7852009-02-26 23:43:05 -05007817static ssize_t
7818trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7819 loff_t *ppos)
7820{
7821 struct trace_option_dentry *topt = filp->private_data;
7822 char *buf;
7823
7824 if (topt->flags->val & topt->opt->bit)
7825 buf = "1\n";
7826 else
7827 buf = "0\n";
7828
7829 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7830}
7831
7832static ssize_t
7833trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7834 loff_t *ppos)
7835{
7836 struct trace_option_dentry *topt = filp->private_data;
7837 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007838 int ret;
7839
Peter Huewe22fe9b52011-06-07 21:58:27 +02007840 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7841 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007842 return ret;
7843
Li Zefan8d18eaa2009-12-08 11:17:06 +08007844 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007845 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007846
7847 if (!!(topt->flags->val & topt->opt->bit) != val) {
7848 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007849 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007850 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007851 mutex_unlock(&trace_types_lock);
7852 if (ret)
7853 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007854 }
7855
7856 *ppos += cnt;
7857
7858 return cnt;
7859}
7860
7861
7862static const struct file_operations trace_options_fops = {
7863 .open = tracing_open_generic,
7864 .read = trace_options_read,
7865 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007866 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007867};
7868
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007869/*
7870 * In order to pass in both the trace_array descriptor as well as the index
7871 * to the flag that the trace option file represents, the trace_array
7872 * has a character array of trace_flags_index[], which holds the index
7873 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7874 * The address of this character array is passed to the flag option file
7875 * read/write callbacks.
7876 *
7877 * In order to extract both the index and the trace_array descriptor,
7878 * get_tr_index() uses the following algorithm.
7879 *
7880 * idx = *ptr;
7881 *
7882 * As the pointer itself contains the address of the index (remember
7883 * index[1] == 1).
7884 *
7885 * Then to get the trace_array descriptor, by subtracting that index
7886 * from the ptr, we get to the start of the index itself.
7887 *
7888 * ptr - idx == &index[0]
7889 *
7890 * Then a simple container_of() from that pointer gets us to the
7891 * trace_array descriptor.
7892 */
7893static void get_tr_index(void *data, struct trace_array **ptr,
7894 unsigned int *pindex)
7895{
7896 *pindex = *(unsigned char *)data;
7897
7898 *ptr = container_of(data - *pindex, struct trace_array,
7899 trace_flags_index);
7900}
7901
Steven Rostedta8259072009-02-26 22:19:12 -05007902static ssize_t
7903trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7904 loff_t *ppos)
7905{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007906 void *tr_index = filp->private_data;
7907 struct trace_array *tr;
7908 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007909 char *buf;
7910
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007911 get_tr_index(tr_index, &tr, &index);
7912
7913 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007914 buf = "1\n";
7915 else
7916 buf = "0\n";
7917
7918 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7919}
7920
7921static ssize_t
7922trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7923 loff_t *ppos)
7924{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007925 void *tr_index = filp->private_data;
7926 struct trace_array *tr;
7927 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007928 unsigned long val;
7929 int ret;
7930
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007931 get_tr_index(tr_index, &tr, &index);
7932
Peter Huewe22fe9b52011-06-07 21:58:27 +02007933 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7934 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007935 return ret;
7936
Zhaoleif2d84b62009-08-07 18:55:48 +08007937 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007938 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007939
7940 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007941 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007942 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007943
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007944 if (ret < 0)
7945 return ret;
7946
Steven Rostedta8259072009-02-26 22:19:12 -05007947 *ppos += cnt;
7948
7949 return cnt;
7950}
7951
Steven Rostedta8259072009-02-26 22:19:12 -05007952static const struct file_operations trace_options_core_fops = {
7953 .open = tracing_open_generic,
7954 .read = trace_options_core_read,
7955 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007956 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007957};
7958
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007959struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007960 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007961 struct dentry *parent,
7962 void *data,
7963 const struct file_operations *fops)
7964{
7965 struct dentry *ret;
7966
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007967 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007968 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007969 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007970
7971 return ret;
7972}
7973
7974
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007975static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007976{
7977 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007978
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007979 if (tr->options)
7980 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007981
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007982 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007983 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007984 return NULL;
7985
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007986 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007987 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007988 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007989 return NULL;
7990 }
7991
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007992 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007993}
7994
Steven Rostedt577b7852009-02-26 23:43:05 -05007995static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007996create_trace_option_file(struct trace_array *tr,
7997 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007998 struct tracer_flags *flags,
7999 struct tracer_opt *opt)
8000{
8001 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008002
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008003 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008004 if (!t_options)
8005 return;
8006
8007 topt->flags = flags;
8008 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008009 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008010
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008011 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008012 &trace_options_fops);
8013
Steven Rostedt577b7852009-02-26 23:43:05 -05008014}
8015
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008016static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008017create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008018{
8019 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008020 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008021 struct tracer_flags *flags;
8022 struct tracer_opt *opts;
8023 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008024 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008025
8026 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008027 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008028
8029 flags = tracer->flags;
8030
8031 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008032 return;
8033
8034 /*
8035 * If this is an instance, only create flags for tracers
8036 * the instance may have.
8037 */
8038 if (!trace_ok_for_array(tracer, tr))
8039 return;
8040
8041 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008042 /* Make sure there's no duplicate flags. */
8043 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008044 return;
8045 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008046
8047 opts = flags->opts;
8048
8049 for (cnt = 0; opts[cnt].name; cnt++)
8050 ;
8051
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008052 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008053 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008054 return;
8055
8056 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8057 GFP_KERNEL);
8058 if (!tr_topts) {
8059 kfree(topts);
8060 return;
8061 }
8062
8063 tr->topts = tr_topts;
8064 tr->topts[tr->nr_topts].tracer = tracer;
8065 tr->topts[tr->nr_topts].topts = topts;
8066 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008067
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008068 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008069 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008070 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008071 WARN_ONCE(topts[cnt].entry == NULL,
8072 "Failed to create trace option: %s",
8073 opts[cnt].name);
8074 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008075}
8076
Steven Rostedta8259072009-02-26 22:19:12 -05008077static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008078create_trace_option_core_file(struct trace_array *tr,
8079 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008080{
8081 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008082
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008083 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008084 if (!t_options)
8085 return NULL;
8086
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008087 return trace_create_file(option, 0644, t_options,
8088 (void *)&tr->trace_flags_index[index],
8089 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008090}
8091
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008092static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008093{
8094 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008095 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008096 int i;
8097
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008098 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008099 if (!t_options)
8100 return;
8101
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008102 for (i = 0; trace_options[i]; i++) {
8103 if (top_level ||
8104 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8105 create_trace_option_core_file(tr, trace_options[i], i);
8106 }
Steven Rostedta8259072009-02-26 22:19:12 -05008107}
8108
Steven Rostedt499e5472012-02-22 15:50:28 -05008109static ssize_t
8110rb_simple_read(struct file *filp, char __user *ubuf,
8111 size_t cnt, loff_t *ppos)
8112{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008113 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008114 char buf[64];
8115 int r;
8116
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008117 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008118 r = sprintf(buf, "%d\n", r);
8119
8120 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8121}
8122
8123static ssize_t
8124rb_simple_write(struct file *filp, const char __user *ubuf,
8125 size_t cnt, loff_t *ppos)
8126{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008127 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008128 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008129 unsigned long val;
8130 int ret;
8131
8132 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8133 if (ret)
8134 return ret;
8135
8136 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008137 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008138 if (!!val == tracer_tracing_is_on(tr)) {
8139 val = 0; /* do nothing */
8140 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008141 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008142 if (tr->current_trace->start)
8143 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008144 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008145 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008146 if (tr->current_trace->stop)
8147 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008148 }
8149 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008150 }
8151
8152 (*ppos)++;
8153
8154 return cnt;
8155}
8156
8157static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008158 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008159 .read = rb_simple_read,
8160 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008161 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008162 .llseek = default_llseek,
8163};
8164
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008165static ssize_t
8166buffer_percent_read(struct file *filp, char __user *ubuf,
8167 size_t cnt, loff_t *ppos)
8168{
8169 struct trace_array *tr = filp->private_data;
8170 char buf[64];
8171 int r;
8172
8173 r = tr->buffer_percent;
8174 r = sprintf(buf, "%d\n", r);
8175
8176 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8177}
8178
8179static ssize_t
8180buffer_percent_write(struct file *filp, const char __user *ubuf,
8181 size_t cnt, loff_t *ppos)
8182{
8183 struct trace_array *tr = filp->private_data;
8184 unsigned long val;
8185 int ret;
8186
8187 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8188 if (ret)
8189 return ret;
8190
8191 if (val > 100)
8192 return -EINVAL;
8193
8194 if (!val)
8195 val = 1;
8196
8197 tr->buffer_percent = val;
8198
8199 (*ppos)++;
8200
8201 return cnt;
8202}
8203
8204static const struct file_operations buffer_percent_fops = {
8205 .open = tracing_open_generic_tr,
8206 .read = buffer_percent_read,
8207 .write = buffer_percent_write,
8208 .release = tracing_release_generic_tr,
8209 .llseek = default_llseek,
8210};
8211
YueHaibingff585c52019-06-14 23:32:10 +08008212static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008213
8214static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008215init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008216
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008217static int
8218allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008219{
8220 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008221
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008222 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008223
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008224 buf->tr = tr;
8225
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008226 buf->buffer = ring_buffer_alloc(size, rb_flags);
8227 if (!buf->buffer)
8228 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008229
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008230 buf->data = alloc_percpu(struct trace_array_cpu);
8231 if (!buf->data) {
8232 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008233 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008234 return -ENOMEM;
8235 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008236
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008237 /* Allocate the first page for all buffers */
8238 set_buffer_entries(&tr->trace_buffer,
8239 ring_buffer_size(tr->trace_buffer.buffer, 0));
8240
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008241 return 0;
8242}
8243
8244static int allocate_trace_buffers(struct trace_array *tr, int size)
8245{
8246 int ret;
8247
8248 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8249 if (ret)
8250 return ret;
8251
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008252#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008253 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8254 allocate_snapshot ? size : 1);
8255 if (WARN_ON(ret)) {
8256 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008257 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008258 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08008259 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008260 return -ENOMEM;
8261 }
8262 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008263
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008264 /*
8265 * Only the top level trace array gets its snapshot allocated
8266 * from the kernel command line.
8267 */
8268 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008269#endif
8270 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008271}
8272
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008273static void free_trace_buffer(struct trace_buffer *buf)
8274{
8275 if (buf->buffer) {
8276 ring_buffer_free(buf->buffer);
8277 buf->buffer = NULL;
8278 free_percpu(buf->data);
8279 buf->data = NULL;
8280 }
8281}
8282
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008283static void free_trace_buffers(struct trace_array *tr)
8284{
8285 if (!tr)
8286 return;
8287
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008288 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008289
8290#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008291 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008292#endif
8293}
8294
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008295static void init_trace_flags_index(struct trace_array *tr)
8296{
8297 int i;
8298
8299 /* Used by the trace options files */
8300 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8301 tr->trace_flags_index[i] = i;
8302}
8303
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008304static void __update_tracer_options(struct trace_array *tr)
8305{
8306 struct tracer *t;
8307
8308 for (t = trace_types; t; t = t->next)
8309 add_tracer_options(tr, t);
8310}
8311
8312static void update_tracer_options(struct trace_array *tr)
8313{
8314 mutex_lock(&trace_types_lock);
8315 __update_tracer_options(tr);
8316 mutex_unlock(&trace_types_lock);
8317}
8318
Divya Indif45d1222019-03-20 11:28:51 -07008319struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008320{
Steven Rostedt277ba042012-08-03 16:10:49 -04008321 struct trace_array *tr;
8322 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008323
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008324 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008325 mutex_lock(&trace_types_lock);
8326
8327 ret = -EEXIST;
8328 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8329 if (tr->name && strcmp(tr->name, name) == 0)
8330 goto out_unlock;
8331 }
8332
8333 ret = -ENOMEM;
8334 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8335 if (!tr)
8336 goto out_unlock;
8337
8338 tr->name = kstrdup(name, GFP_KERNEL);
8339 if (!tr->name)
8340 goto out_free_tr;
8341
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008342 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8343 goto out_free_tr;
8344
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008345 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008346
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008347 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8348
Steven Rostedt277ba042012-08-03 16:10:49 -04008349 raw_spin_lock_init(&tr->start_lock);
8350
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008351 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8352
Steven Rostedt277ba042012-08-03 16:10:49 -04008353 tr->current_trace = &nop_trace;
8354
8355 INIT_LIST_HEAD(&tr->systems);
8356 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008357 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008358 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008359
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008360 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008361 goto out_free_tr;
8362
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008363 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008364 if (!tr->dir)
8365 goto out_free_tr;
8366
8367 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008368 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008369 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008370 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008371 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008372
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008373 ftrace_init_trace_array(tr);
8374
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008375 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008376 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008377 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008378
8379 list_add(&tr->list, &ftrace_trace_arrays);
8380
8381 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008382 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008383
Divya Indif45d1222019-03-20 11:28:51 -07008384 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04008385
8386 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008387 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008388 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008389 kfree(tr->name);
8390 kfree(tr);
8391
8392 out_unlock:
8393 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008394 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008395
Divya Indif45d1222019-03-20 11:28:51 -07008396 return ERR_PTR(ret);
8397}
8398EXPORT_SYMBOL_GPL(trace_array_create);
Steven Rostedt277ba042012-08-03 16:10:49 -04008399
Divya Indif45d1222019-03-20 11:28:51 -07008400static int instance_mkdir(const char *name)
8401{
8402 return PTR_ERR_OR_ZERO(trace_array_create(name));
Steven Rostedt277ba042012-08-03 16:10:49 -04008403}
8404
Divya Indif45d1222019-03-20 11:28:51 -07008405static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008406{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008407 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008408
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05008409 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Divya Indif45d1222019-03-20 11:28:51 -07008410 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008411
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008412 list_del(&tr->list);
8413
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008414 /* Disable all the flags that were enabled coming in */
8415 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8416 if ((1 << i) & ZEROED_TRACE_FLAGS)
8417 set_tracer_flag(tr, 1 << i, 0);
8418 }
8419
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008420 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308421 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008422 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008423 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008424 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08008425 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008426 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008427
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008428 for (i = 0; i < tr->nr_topts; i++) {
8429 kfree(tr->topts[i].topts);
8430 }
8431 kfree(tr->topts);
8432
Chunyu Hudb9108e02017-07-20 18:36:09 +08008433 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008434 kfree(tr->name);
8435 kfree(tr);
Divya Indif45d1222019-03-20 11:28:51 -07008436 tr = NULL;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008437
Divya Indif45d1222019-03-20 11:28:51 -07008438 return 0;
8439}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008440
Divya Indif45d1222019-03-20 11:28:51 -07008441int trace_array_destroy(struct trace_array *tr)
8442{
8443 int ret;
8444
8445 if (!tr)
8446 return -EINVAL;
8447
8448 mutex_lock(&event_mutex);
8449 mutex_lock(&trace_types_lock);
8450
8451 ret = __remove_instance(tr);
8452
8453 mutex_unlock(&trace_types_lock);
8454 mutex_unlock(&event_mutex);
8455
8456 return ret;
8457}
8458EXPORT_SYMBOL_GPL(trace_array_destroy);
8459
8460static int instance_rmdir(const char *name)
8461{
8462 struct trace_array *tr;
8463 int ret;
8464
8465 mutex_lock(&event_mutex);
8466 mutex_lock(&trace_types_lock);
8467
8468 ret = -ENODEV;
8469 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8470 if (tr->name && strcmp(tr->name, name) == 0) {
8471 ret = __remove_instance(tr);
8472 break;
8473 }
8474 }
8475
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008476 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008477 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008478
8479 return ret;
8480}
8481
Steven Rostedt277ba042012-08-03 16:10:49 -04008482static __init void create_trace_instances(struct dentry *d_tracer)
8483{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008484 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8485 instance_mkdir,
8486 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008487 if (WARN_ON(!trace_instance_dir))
8488 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008489}
8490
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008491static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008492init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008493{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008494 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008495 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008496
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008497 trace_create_file("available_tracers", 0444, d_tracer,
8498 tr, &show_traces_fops);
8499
8500 trace_create_file("current_tracer", 0644, d_tracer,
8501 tr, &set_tracer_fops);
8502
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008503 trace_create_file("tracing_cpumask", 0644, d_tracer,
8504 tr, &tracing_cpumask_fops);
8505
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008506 trace_create_file("trace_options", 0644, d_tracer,
8507 tr, &tracing_iter_fops);
8508
8509 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008510 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008511
8512 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008513 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008514
8515 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008516 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008517
8518 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8519 tr, &tracing_total_entries_fops);
8520
Wang YanQing238ae932013-05-26 16:52:01 +08008521 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008522 tr, &tracing_free_buffer_fops);
8523
8524 trace_create_file("trace_marker", 0220, d_tracer,
8525 tr, &tracing_mark_fops);
8526
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008527 file = __find_event_file(tr, "ftrace", "print");
8528 if (file && file->dir)
8529 trace_create_file("trigger", 0644, file->dir, file,
8530 &event_trigger_fops);
8531 tr->trace_marker_file = file;
8532
Steven Rostedtfa32e852016-07-06 15:25:08 -04008533 trace_create_file("trace_marker_raw", 0220, d_tracer,
8534 tr, &tracing_mark_raw_fops);
8535
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008536 trace_create_file("trace_clock", 0644, d_tracer, tr,
8537 &trace_clock_fops);
8538
8539 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008540 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008541
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008542 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8543 &trace_time_stamp_mode_fops);
8544
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008545 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008546
8547 trace_create_file("buffer_percent", 0444, d_tracer,
8548 tr, &buffer_percent_fops);
8549
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008550 create_trace_options_dir(tr);
8551
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008552#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008553 trace_create_file("tracing_max_latency", 0644, d_tracer,
8554 &tr->max_latency, &tracing_max_lat_fops);
8555#endif
8556
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008557 if (ftrace_create_function_files(tr, d_tracer))
8558 WARN(1, "Could not allocate function filter files");
8559
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008560#ifdef CONFIG_TRACER_SNAPSHOT
8561 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008562 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008563#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008564
Tom Zanussi8a062902019-03-31 18:48:15 -05008565 trace_create_file("error_log", 0644, d_tracer,
8566 tr, &tracing_err_log_fops);
8567
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008568 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008569 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008570
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008571 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008572}
8573
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008574static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008575{
8576 struct vfsmount *mnt;
8577 struct file_system_type *type;
8578
8579 /*
8580 * To maintain backward compatibility for tools that mount
8581 * debugfs to get to the tracing facility, tracefs is automatically
8582 * mounted to the debugfs/tracing directory.
8583 */
8584 type = get_fs_type("tracefs");
8585 if (!type)
8586 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008587 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008588 put_filesystem(type);
8589 if (IS_ERR(mnt))
8590 return NULL;
8591 mntget(mnt);
8592
8593 return mnt;
8594}
8595
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008596/**
8597 * tracing_init_dentry - initialize top level trace array
8598 *
8599 * This is called when creating files or directories in the tracing
8600 * directory. It is called via fs_initcall() by any of the boot up code
8601 * and expects to return the dentry of the top level tracing directory.
8602 */
8603struct dentry *tracing_init_dentry(void)
8604{
8605 struct trace_array *tr = &global_trace;
8606
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008607 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008608 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008609 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008610
Jiaxing Wang8b129192015-11-06 16:04:16 +08008611 if (WARN_ON(!tracefs_initialized()) ||
8612 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8613 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008614 return ERR_PTR(-ENODEV);
8615
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008616 /*
8617 * As there may still be users that expect the tracing
8618 * files to exist in debugfs/tracing, we must automount
8619 * the tracefs file system there, so older tools still
8620 * work with the newer kerenl.
8621 */
8622 tr->dir = debugfs_create_automount("tracing", NULL,
8623 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008624
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008625 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008626}
8627
Jeremy Linton00f4b652017-05-31 16:56:43 -05008628extern struct trace_eval_map *__start_ftrace_eval_maps[];
8629extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008630
Jeremy Linton5f60b352017-05-31 16:56:47 -05008631static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008632{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008633 int len;
8634
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008635 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008636 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008637}
8638
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008639#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008640static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008641{
Jeremy Linton99be6472017-05-31 16:56:44 -05008642 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008643 return;
8644
8645 /*
8646 * Modules with bad taint do not have events created, do
8647 * not bother with enums either.
8648 */
8649 if (trace_module_has_bad_taint(mod))
8650 return;
8651
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008652 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008653}
8654
Jeremy Linton681bec02017-05-31 16:56:53 -05008655#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008656static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008657{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008658 union trace_eval_map_item *map;
8659 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008660
Jeremy Linton99be6472017-05-31 16:56:44 -05008661 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008662 return;
8663
Jeremy Linton1793ed92017-05-31 16:56:46 -05008664 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008665
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008666 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008667
8668 while (map) {
8669 if (map->head.mod == mod)
8670 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008671 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008672 last = &map->tail.next;
8673 map = map->tail.next;
8674 }
8675 if (!map)
8676 goto out;
8677
Jeremy Linton5f60b352017-05-31 16:56:47 -05008678 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008679 kfree(map);
8680 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008681 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008682}
8683#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008684static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008685#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008686
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008687static int trace_module_notify(struct notifier_block *self,
8688 unsigned long val, void *data)
8689{
8690 struct module *mod = data;
8691
8692 switch (val) {
8693 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008694 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008695 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008696 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008697 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008698 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008699 }
8700
8701 return 0;
8702}
8703
8704static struct notifier_block trace_module_nb = {
8705 .notifier_call = trace_module_notify,
8706 .priority = 0,
8707};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008708#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008709
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008710static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008711{
8712 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008713
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008714 trace_access_lock_init();
8715
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008716 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008717 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008718 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008719
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04008720 event_trace_init();
8721
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008722 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008723 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008724
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008725 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008726 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008727
Li Zefan339ae5d2009-04-17 10:34:30 +08008728 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008729 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008730
Avadh Patel69abe6a2009-04-10 16:04:48 -04008731 trace_create_file("saved_cmdlines", 0444, d_tracer,
8732 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008733
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008734 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8735 NULL, &tracing_saved_cmdlines_size_fops);
8736
Michael Sartain99c621d2017-07-05 22:07:15 -06008737 trace_create_file("saved_tgids", 0444, d_tracer,
8738 NULL, &tracing_saved_tgids_fops);
8739
Jeremy Linton5f60b352017-05-31 16:56:47 -05008740 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008741
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008742 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008743
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008744#ifdef CONFIG_MODULES
8745 register_module_notifier(&trace_module_nb);
8746#endif
8747
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008748#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008749 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8750 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008751#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008752
Steven Rostedt277ba042012-08-03 16:10:49 -04008753 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008754
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008755 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008756
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008757 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008758}
8759
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008760static int trace_panic_handler(struct notifier_block *this,
8761 unsigned long event, void *unused)
8762{
Steven Rostedt944ac422008-10-23 19:26:08 -04008763 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008764 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008765 return NOTIFY_OK;
8766}
8767
8768static struct notifier_block trace_panic_notifier = {
8769 .notifier_call = trace_panic_handler,
8770 .next = NULL,
8771 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8772};
8773
8774static int trace_die_handler(struct notifier_block *self,
8775 unsigned long val,
8776 void *data)
8777{
8778 switch (val) {
8779 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008780 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008781 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008782 break;
8783 default:
8784 break;
8785 }
8786 return NOTIFY_OK;
8787}
8788
8789static struct notifier_block trace_die_notifier = {
8790 .notifier_call = trace_die_handler,
8791 .priority = 200
8792};
8793
8794/*
8795 * printk is set to max of 1024, we really don't need it that big.
8796 * Nothing should be printing 1000 characters anyway.
8797 */
8798#define TRACE_MAX_PRINT 1000
8799
8800/*
8801 * Define here KERN_TRACE so that we have one place to modify
8802 * it if we decide to change what log level the ftrace dump
8803 * should be at.
8804 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008805#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008806
Jason Wessel955b61e2010-08-05 09:22:23 -05008807void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008808trace_printk_seq(struct trace_seq *s)
8809{
8810 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008811 if (s->seq.len >= TRACE_MAX_PRINT)
8812 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008813
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008814 /*
8815 * More paranoid code. Although the buffer size is set to
8816 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8817 * an extra layer of protection.
8818 */
8819 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8820 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008821
8822 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008823 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008824
8825 printk(KERN_TRACE "%s", s->buffer);
8826
Steven Rostedtf9520752009-03-02 14:04:40 -05008827 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008828}
8829
Jason Wessel955b61e2010-08-05 09:22:23 -05008830void trace_init_global_iter(struct trace_iterator *iter)
8831{
8832 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008833 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008834 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008835 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008836
8837 if (iter->trace && iter->trace->open)
8838 iter->trace->open(iter);
8839
8840 /* Annotate start of buffers if we had overruns */
8841 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8842 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8843
8844 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8845 if (trace_clocks[iter->tr->clock_id].in_ns)
8846 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008847}
8848
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008849void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008850{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008851 /* use static because iter can be a bit big for the stack */
8852 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008853 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008854 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008855 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008856 unsigned long flags;
8857 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008858
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008859 /* Only allow one dump user at a time. */
8860 if (atomic_inc_return(&dump_running) != 1) {
8861 atomic_dec(&dump_running);
8862 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008863 }
8864
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008865 /*
8866 * Always turn off tracing when we dump.
8867 * We don't need to show trace output of what happens
8868 * between multiple crashes.
8869 *
8870 * If the user does a sysrq-z, then they can re-enable
8871 * tracing with echo 1 > tracing_on.
8872 */
8873 tracing_off();
8874
8875 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02008876 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008877
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008878 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008879 trace_init_global_iter(&iter);
8880
Steven Rostedtd7690412008-10-01 00:29:53 -04008881 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308882 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008883 }
8884
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008885 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008886
Török Edwinb54d3de2008-11-22 13:28:48 +02008887 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008888 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008889
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008890 switch (oops_dump_mode) {
8891 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008892 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008893 break;
8894 case DUMP_ORIG:
8895 iter.cpu_file = raw_smp_processor_id();
8896 break;
8897 case DUMP_NONE:
8898 goto out_enable;
8899 default:
8900 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008901 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008902 }
8903
8904 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008905
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008906 /* Did function tracer already get disabled? */
8907 if (ftrace_is_dead()) {
8908 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8909 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8910 }
8911
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008912 /*
8913 * We need to stop all tracing on all CPUS to read the
8914 * the next buffer. This is a bit expensive, but is
8915 * not done often. We fill all what we can read,
8916 * and then release the locks again.
8917 */
8918
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008919 while (!trace_empty(&iter)) {
8920
8921 if (!cnt)
8922 printk(KERN_TRACE "---------------------------------\n");
8923
8924 cnt++;
8925
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02008926 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008927 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008928
Jason Wessel955b61e2010-08-05 09:22:23 -05008929 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008930 int ret;
8931
8932 ret = print_trace_line(&iter);
8933 if (ret != TRACE_TYPE_NO_CONSUME)
8934 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008935 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008936 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008937
8938 trace_printk_seq(&iter.seq);
8939 }
8940
8941 if (!cnt)
8942 printk(KERN_TRACE " (ftrace buffer empty)\n");
8943 else
8944 printk(KERN_TRACE "---------------------------------\n");
8945
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008946 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008947 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008948
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008949 for_each_tracing_cpu(cpu) {
8950 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008951 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02008952 atomic_dec(&dump_running);
8953 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008954 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008955}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008956EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008957
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008958int trace_run_command(const char *buf, int (*createfn)(int, char **))
8959{
8960 char **argv;
8961 int argc, ret;
8962
8963 argc = 0;
8964 ret = 0;
8965 argv = argv_split(GFP_KERNEL, buf, &argc);
8966 if (!argv)
8967 return -ENOMEM;
8968
8969 if (argc)
8970 ret = createfn(argc, argv);
8971
8972 argv_free(argv);
8973
8974 return ret;
8975}
8976
8977#define WRITE_BUFSIZE 4096
8978
8979ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8980 size_t count, loff_t *ppos,
8981 int (*createfn)(int, char **))
8982{
8983 char *kbuf, *buf, *tmp;
8984 int ret = 0;
8985 size_t done = 0;
8986 size_t size;
8987
8988 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8989 if (!kbuf)
8990 return -ENOMEM;
8991
8992 while (done < count) {
8993 size = count - done;
8994
8995 if (size >= WRITE_BUFSIZE)
8996 size = WRITE_BUFSIZE - 1;
8997
8998 if (copy_from_user(kbuf, buffer + done, size)) {
8999 ret = -EFAULT;
9000 goto out;
9001 }
9002 kbuf[size] = '\0';
9003 buf = kbuf;
9004 do {
9005 tmp = strchr(buf, '\n');
9006 if (tmp) {
9007 *tmp = '\0';
9008 size = tmp - buf + 1;
9009 } else {
9010 size = strlen(buf);
9011 if (done + size < count) {
9012 if (buf != kbuf)
9013 break;
9014 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9015 pr_warn("Line length is too long: Should be less than %d\n",
9016 WRITE_BUFSIZE - 2);
9017 ret = -EINVAL;
9018 goto out;
9019 }
9020 }
9021 done += size;
9022
9023 /* Remove comments */
9024 tmp = strchr(buf, '#');
9025
9026 if (tmp)
9027 *tmp = '\0';
9028
9029 ret = trace_run_command(buf, createfn);
9030 if (ret)
9031 goto out;
9032 buf += size;
9033
9034 } while (done < count);
9035 }
9036 ret = done;
9037
9038out:
9039 kfree(kbuf);
9040
9041 return ret;
9042}
9043
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009044__init static int tracer_alloc_buffers(void)
9045{
Steven Rostedt73c51622009-03-11 13:42:01 -04009046 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309047 int ret = -ENOMEM;
9048
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009049 /*
9050 * Make sure we don't accidently add more trace options
9051 * than we have bits for.
9052 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009053 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009054
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309055 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9056 goto out;
9057
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009058 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309059 goto out_free_buffer_mask;
9060
Steven Rostedt07d777f2011-09-22 14:01:55 -04009061 /* Only allocate trace_printk buffers if a trace_printk exists */
9062 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009063 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009064 trace_printk_init_buffers();
9065
Steven Rostedt73c51622009-03-11 13:42:01 -04009066 /* To save memory, keep the ring buffer size to its minimum */
9067 if (ring_buffer_expanded)
9068 ring_buf_size = trace_buf_size;
9069 else
9070 ring_buf_size = 1;
9071
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309072 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009073 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009074
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009075 raw_spin_lock_init(&global_trace.start_lock);
9076
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009077 /*
9078 * The prepare callbacks allocates some memory for the ring buffer. We
9079 * don't free the buffer if the if the CPU goes down. If we were to free
9080 * the buffer, then the user would lose any trace that was in the
9081 * buffer. The memory will be removed once the "instance" is removed.
9082 */
9083 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9084 "trace/RB:preapre", trace_rb_cpu_prepare,
9085 NULL);
9086 if (ret < 0)
9087 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009088 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009089 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009090 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9091 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009092 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009093
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009094 if (trace_create_savedcmd() < 0)
9095 goto out_free_temp_buffer;
9096
Steven Rostedtab464282008-05-12 21:21:00 +02009097 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009098 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009099 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9100 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009101 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009102 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009103
Steven Rostedt499e5472012-02-22 15:50:28 -05009104 if (global_trace.buffer_disabled)
9105 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009106
Steven Rostedte1e232c2014-02-10 23:38:46 -05009107 if (trace_boot_clock) {
9108 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9109 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009110 pr_warn("Trace clock %s not defined, going back to default\n",
9111 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009112 }
9113
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009114 /*
9115 * register_tracer() might reference current_trace, so it
9116 * needs to be set before we register anything. This is
9117 * just a bootstrap of current_trace anyway.
9118 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009119 global_trace.current_trace = &nop_trace;
9120
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009121 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9122
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009123 ftrace_init_global_array_ops(&global_trace);
9124
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009125 init_trace_flags_index(&global_trace);
9126
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009127 register_tracer(&nop_trace);
9128
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009129 /* Function tracing may start here (via kernel command line) */
9130 init_function_trace();
9131
Steven Rostedt60a11772008-05-12 21:20:44 +02009132 /* All seems OK, enable tracing */
9133 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009134
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009135 atomic_notifier_chain_register(&panic_notifier_list,
9136 &trace_panic_notifier);
9137
9138 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009139
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009140 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9141
9142 INIT_LIST_HEAD(&global_trace.systems);
9143 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009144 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009145 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009146 list_add(&global_trace.list, &ftrace_trace_arrays);
9147
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009148 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009149
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009150 register_snapshot_cmd();
9151
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009152 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009153
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009154out_free_savedcmd:
9155 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009156out_free_temp_buffer:
9157 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009158out_rm_hp_state:
9159 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309160out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009161 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309162out_free_buffer_mask:
9163 free_cpumask_var(tracing_buffer_mask);
9164out:
9165 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009166}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009167
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009168void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009169{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009170 if (tracepoint_printk) {
9171 tracepoint_print_iter =
9172 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9173 if (WARN_ON(!tracepoint_print_iter))
9174 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009175 else
9176 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009177 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009178 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009179}
9180
9181void __init trace_init(void)
9182{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009183 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009184}
9185
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009186__init static int clear_boot_tracer(void)
9187{
9188 /*
9189 * The default tracer at boot buffer is an init section.
9190 * This function is called in lateinit. If we did not
9191 * find the boot tracer, then clear it out, to prevent
9192 * later registration from accessing the buffer that is
9193 * about to be freed.
9194 */
9195 if (!default_bootup_tracer)
9196 return 0;
9197
9198 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9199 default_bootup_tracer);
9200 default_bootup_tracer = NULL;
9201
9202 return 0;
9203}
9204
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009205fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009206late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009207
9208#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9209__init static int tracing_set_default_clock(void)
9210{
9211 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009212 if (!trace_boot_clock && !sched_clock_stable()) {
Chris Wilson3fd49c92018-03-30 16:01:31 +01009213 printk(KERN_WARNING
9214 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9215 "If you want to keep using the local clock, then add:\n"
9216 " \"trace_clock=local\"\n"
9217 "on the kernel command line\n");
9218 tracing_set_clock(&global_trace, "global");
9219 }
9220
9221 return 0;
9222}
9223late_initcall_sync(tracing_set_default_clock);
9224#endif