blob: d3294721f11904010e8c5ddbcc833589310078e2 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedt81d68a92008-05-12 21:20:42 +02002/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05003 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02004 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010011 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt81d68a92008-05-12 21:20:42 +020012 */
13#include <linux/kallsyms.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020014#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020017
18#include "trace.h"
19
Joel Fernandesd5915812017-10-10 15:51:37 -070020#include <trace/events/preemptirq.h>
21
Joel Fernandesaaecaa02017-10-05 17:54:31 -070022#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt81d68a92008-05-12 21:20:42 +020023static struct trace_array *irqsoff_trace __read_mostly;
24static int tracer_enabled __read_mostly;
25
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020026static DEFINE_PER_CPU(int, tracing_cpu);
27
Thomas Gleixner5389f6f2009-07-25 17:13:33 +020028static DEFINE_RAW_SPINLOCK(max_trace_lock);
Steven Rostedt89b2f972008-05-12 21:20:44 +020029
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020030enum {
31 TRACER_IRQS_OFF = (1 << 1),
32 TRACER_PREEMPT_OFF = (1 << 2),
33};
34
35static int trace_type __read_mostly;
36
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -040037static int save_flags;
Steven Rostedte9d25fe2009-03-04 22:15:30 -050038
Jiri Olsa62b915f2010-04-02 19:01:22 +020039static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
40static int start_irqsoff_tracer(struct trace_array *tr, int graph);
41
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020042#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020043static inline int
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040044preempt_trace(int pc)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020045{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040046 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020047}
48#else
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040049# define preempt_trace(pc) (0)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020050#endif
51
52#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020053static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020054irq_trace(void)
55{
56 return ((trace_type & TRACER_IRQS_OFF) &&
57 irqs_disabled());
58}
59#else
60# define irq_trace() (0)
61#endif
62
Jiri Olsa62b915f2010-04-02 19:01:22 +020063#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040064static int irqsoff_display_graph(struct trace_array *tr, int set);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040065# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040066#else
67static inline int irqsoff_display_graph(struct trace_array *tr, int set)
68{
69 return -EINVAL;
70}
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040071# define is_graph(tr) false
Jiri Olsa62b915f2010-04-02 19:01:22 +020072#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +020073
Steven Rostedt81d68a92008-05-12 21:20:42 +020074/*
75 * Sequence count - we record it when starting a measurement and
76 * skip the latency if the sequence has changed - some other section
77 * did a maximum and could disturb our measurement with serial console
78 * printouts, etc. Truly coinciding maximum latencies should be rare
Lucas De Marchi25985ed2011-03-30 22:57:33 -030079 * and what happens together happens separately as well, so this doesn't
Steven Rostedt81d68a92008-05-12 21:20:42 +020080 * decrease the validity of the maximum found:
81 */
82static __cacheline_aligned_in_smp unsigned long max_sequence;
83
Steven Rostedt606576c2008-10-06 19:06:12 -040084#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020085/*
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040086 * Prologue for the preempt and irqs off function tracers.
87 *
88 * Returns 1 if it is OK to continue, and data->disabled is
89 * incremented.
90 * 0 if the trace is to be ignored, and data->disabled
91 * is kept the same.
92 *
93 * Note, this function is also used outside this ifdef but
94 * inside the #ifdef of the function graph tracer below.
95 * This is OK, since the function graph tracer is
96 * dependent on the function tracer.
Steven Rostedt81d68a92008-05-12 21:20:42 +020097 */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040098static int func_prolog_dec(struct trace_array *tr,
99 struct trace_array_cpu **data,
100 unsigned long *flags)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200101{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200102 long disabled;
103 int cpu;
104
Steven Rostedt361943a2008-05-12 21:20:44 +0200105 /*
106 * Does not matter if we preempt. We test the flags
107 * afterward, to see if irqs are disabled or not.
108 * If we preempt and get a false positive, the flags
109 * test will fail.
110 */
111 cpu = raw_smp_processor_id();
112 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400113 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200114
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400115 local_save_flags(*flags);
Steven Rostedt (Red Hat)cb86e052016-03-18 12:27:43 -0400116 /*
117 * Slight chance to get a false positive on tracing_cpu,
118 * although I'm starting to think there isn't a chance.
119 * Leave this for now just to be paranoid.
120 */
121 if (!irqs_disabled_flags(*flags) && !preempt_count())
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400122 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500124 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400125 disabled = atomic_inc_return(&(*data)->disabled);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200126
127 if (likely(disabled == 1))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400140 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400141{
142 struct trace_array *tr = irqsoff_trace;
143 struct trace_array_cpu *data;
144 unsigned long flags;
145
146 if (!func_prolog_dec(tr, &data, &flags))
147 return;
148
149 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200150
151 atomic_dec(&data->disabled);
152}
Steven Rostedt606576c2008-10-06 19:06:12 -0400153#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200154
Jiri Olsa62b915f2010-04-02 19:01:22 +0200155#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400156static int irqsoff_display_graph(struct trace_array *tr, int set)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200157{
158 int cpu;
159
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400160 if (!(is_graph(tr) ^ set))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200161 return 0;
162
163 stop_irqsoff_tracer(irqsoff_trace, !set);
164
165 for_each_possible_cpu(cpu)
166 per_cpu(tracing_cpu, cpu) = 0;
167
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500168 tr->max_latency = 0;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500169 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200170
171 return start_irqsoff_tracer(irqsoff_trace, set);
172}
173
174static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
175{
176 struct trace_array *tr = irqsoff_trace;
177 struct trace_array_cpu *data;
178 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200179 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200180 int pc;
181
Steven Rostedt (Red Hat)1a414422016-12-08 19:28:28 -0500182 if (ftrace_graph_ignore_func(trace))
183 return 0;
184 /*
185 * Do not trace a function if it's filtered by set_graph_notrace.
186 * Make the index of ret stack negative to indicate that it should
187 * ignore further functions. But it needs its own ret stack entry
188 * to recover the original index in order to continue tracing after
189 * returning from the function.
190 */
191 if (ftrace_graph_notrace_addr(trace->func))
192 return 1;
193
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400194 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200195 return 0;
196
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400197 pc = preempt_count();
198 ret = __trace_graph_entry(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200199 atomic_dec(&data->disabled);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400200
Jiri Olsa62b915f2010-04-02 19:01:22 +0200201 return ret;
202}
203
204static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
205{
206 struct trace_array *tr = irqsoff_trace;
207 struct trace_array_cpu *data;
208 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200209 int pc;
210
Steven Rostedt (VMware)5cf99a0f2018-11-29 08:50:27 -0500211 ftrace_graph_addr_finish(trace);
212
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400213 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200214 return;
215
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400216 pc = preempt_count();
217 __trace_graph_return(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200218 atomic_dec(&data->disabled);
219}
220
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500221static struct fgraph_ops fgraph_ops = {
222 .entryfunc = &irqsoff_graph_entry,
223 .retfunc = &irqsoff_graph_return,
224};
225
Jiri Olsa62b915f2010-04-02 19:01:22 +0200226static void irqsoff_trace_open(struct trace_iterator *iter)
227{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400228 if (is_graph(iter->tr))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200229 graph_trace_open(iter);
230
231}
232
233static void irqsoff_trace_close(struct trace_iterator *iter)
234{
235 if (iter->private)
236 graph_trace_close(iter);
237}
238
239#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200240 TRACE_GRAPH_PRINT_PROC | \
241 TRACE_GRAPH_PRINT_ABS_TIME | \
242 TRACE_GRAPH_PRINT_DURATION)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200243
244static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
245{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200246 /*
247 * In graph mode call the graph tracer output function,
248 * otherwise go with the TRACE_FN event handler
249 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400250 if (is_graph(iter->tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200251 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200252
253 return TRACE_TYPE_UNHANDLED;
254}
255
256static void irqsoff_print_header(struct seq_file *s)
257{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400258 struct trace_array *tr = irqsoff_trace;
259
260 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200261 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
262 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200263 trace_default_header(s);
264}
265
266static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200267__trace_function(struct trace_array *tr,
268 unsigned long ip, unsigned long parent_ip,
269 unsigned long flags, int pc)
270{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400271 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200272 trace_graph_function(tr, ip, parent_ip, flags, pc);
273 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200274 trace_function(tr, ip, parent_ip, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200275}
276
277#else
278#define __trace_function trace_function
279
Jiri Olsa62b915f2010-04-02 19:01:22 +0200280static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
281{
282 return TRACE_TYPE_UNHANDLED;
283}
284
Jiri Olsa62b915f2010-04-02 19:01:22 +0200285static void irqsoff_trace_open(struct trace_iterator *iter) { }
286static void irqsoff_trace_close(struct trace_iterator *iter) { }
Jiri Olsa7e9a49e2011-11-07 16:08:49 +0100287
288#ifdef CONFIG_FUNCTION_TRACER
289static void irqsoff_print_header(struct seq_file *s)
290{
291 trace_default_header(s);
292}
293#else
294static void irqsoff_print_header(struct seq_file *s)
295{
296 trace_latency_header(s);
297}
298#endif /* CONFIG_FUNCTION_TRACER */
Jiri Olsa62b915f2010-04-02 19:01:22 +0200299#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
300
Steven Rostedt81d68a92008-05-12 21:20:42 +0200301/*
302 * Should this new latency be reported/recorded?
303 */
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100304static bool report_latency(struct trace_array *tr, u64 delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200305{
306 if (tracing_thresh) {
307 if (delta < tracing_thresh)
Yaowei Bai79851822015-09-29 22:43:30 +0800308 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200309 } else {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500310 if (delta <= tr->max_latency)
Yaowei Bai79851822015-09-29 22:43:30 +0800311 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200312 }
Yaowei Bai79851822015-09-29 22:43:30 +0800313 return true;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200314}
315
Ingo Molnare309b412008-05-12 21:20:51 +0200316static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200317check_critical_timing(struct trace_array *tr,
318 struct trace_array_cpu *data,
319 unsigned long parent_ip,
320 int cpu)
321{
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100322 u64 T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200323 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400324 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200325
Steven Rostedt81d68a92008-05-12 21:20:42 +0200326 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200327 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200328 delta = T1-T0;
329
330 local_save_flags(flags);
331
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400332 pc = preempt_count();
333
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500334 if (!report_latency(tr, delta))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200335 goto out;
336
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200337 raw_spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200338
Steven Rostedt89b2f972008-05-12 21:20:44 +0200339 /* check if we are still the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500340 if (!report_latency(tr, delta))
Steven Rostedt89b2f972008-05-12 21:20:44 +0200341 goto out_unlock;
342
Jiri Olsa62b915f2010-04-02 19:01:22 +0200343 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500344 /* Skip 5 functions to get to the irq/preempt enable function */
345 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200346
Steven Rostedt81d68a92008-05-12 21:20:42 +0200347 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200348 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200349
Steven Rostedt81d68a92008-05-12 21:20:42 +0200350 data->critical_end = parent_ip;
351
Carsten Emdeb5130b12009-09-13 01:43:07 +0200352 if (likely(!is_tracing_stopped())) {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500353 tr->max_latency = delta;
Carsten Emdeb5130b12009-09-13 01:43:07 +0200354 update_max_tr_single(tr, current, cpu);
355 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200356
Steven Rostedt81d68a92008-05-12 21:20:42 +0200357 max_sequence++;
358
Steven Rostedt89b2f972008-05-12 21:20:44 +0200359out_unlock:
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200360 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200361
Steven Rostedt81d68a92008-05-12 21:20:42 +0200362out:
363 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200364 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200365 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200366}
367
Ingo Molnare309b412008-05-12 21:20:51 +0200368static inline void
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400369start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200370{
371 int cpu;
372 struct trace_array *tr = irqsoff_trace;
373 struct trace_array_cpu *data;
374 unsigned long flags;
375
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400376 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200377 return;
378
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200379 cpu = raw_smp_processor_id();
380
381 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200382 return;
383
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500384 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200385
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200386 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200387 return;
388
389 atomic_inc(&data->disabled);
390
391 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200392 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200393 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200394
395 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200396
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400397 __trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200398
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200399 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200400
Steven Rostedt81d68a92008-05-12 21:20:42 +0200401 atomic_dec(&data->disabled);
402}
403
Ingo Molnare309b412008-05-12 21:20:51 +0200404static inline void
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400405stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200406{
407 int cpu;
408 struct trace_array *tr = irqsoff_trace;
409 struct trace_array_cpu *data;
410 unsigned long flags;
411
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200412 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200413 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200414 if (unlikely(per_cpu(tracing_cpu, cpu)))
415 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200416 else
417 return;
418
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400419 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200420 return;
421
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500422 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200423
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400424 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200425 !data->critical_start || atomic_read(&data->disabled))
426 return;
427
428 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200429
Steven Rostedt81d68a92008-05-12 21:20:42 +0200430 local_save_flags(flags);
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400431 __trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200432 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200433 data->critical_start = 0;
434 atomic_dec(&data->disabled);
435}
436
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200437/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200438void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200439{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400440 int pc = preempt_count();
441
442 if (preempt_trace(pc) || irq_trace())
443 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200444}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200445EXPORT_SYMBOL_GPL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200446
Ingo Molnare309b412008-05-12 21:20:51 +0200447void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200448{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400449 int pc = preempt_count();
450
451 if (preempt_trace(pc) || irq_trace())
452 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200453}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200454EXPORT_SYMBOL_GPL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200455
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400456#ifdef CONFIG_FUNCTION_TRACER
457static bool function_enabled;
458
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500459static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200460{
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400461 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200462
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400463 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400464 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400465 return 0;
466
467 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500468 ret = register_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400469 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500470 ret = register_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400471
472 if (!ret)
473 function_enabled = true;
474
475 return ret;
476}
477
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500478static void unregister_irqsoff_function(struct trace_array *tr, int graph)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400479{
480 if (!function_enabled)
481 return;
482
483 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500484 unregister_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400485 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500486 unregister_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400487
488 function_enabled = false;
489}
490
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400491static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400492{
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400493 if (!(mask & TRACE_ITER_FUNCTION))
494 return 0;
495
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400496 if (set)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400497 register_irqsoff_function(tr, is_graph(tr), 1);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400498 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400499 unregister_irqsoff_function(tr, is_graph(tr));
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400500 return 1;
501}
502#else
503static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
504{
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400505 return 0;
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400506}
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400507static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
508static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
509{
510 return 0;
511}
512#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400513
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500514static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400515{
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500516 struct tracer *tracer = tr->current_trace;
517
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400518 if (irqsoff_function_set(tr, mask, set))
519 return 0;
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400520
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400521#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400522 if (mask & TRACE_ITER_DISPLAY_GRAPH)
523 return irqsoff_display_graph(tr, set);
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400524#endif
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400525
526 return trace_keep_overwrite(tracer, mask, set);
527}
528
529static int start_irqsoff_tracer(struct trace_array *tr, int graph)
530{
531 int ret;
532
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500533 ret = register_irqsoff_function(tr, graph, 0);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200534
535 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500536 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500537 else
Steven Rostedt90369902008-11-05 16:05:44 -0500538 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200539
540 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200541}
542
Jiri Olsa62b915f2010-04-02 19:01:22 +0200543static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200544{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200545 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200546
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500547 unregister_irqsoff_function(tr, graph);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200548}
549
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500550static bool irqsoff_busy;
551
552static int __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200553{
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500554 if (irqsoff_busy)
555 return -EBUSY;
556
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400557 save_flags = tr->trace_flags;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400558
559 /* non overwrite screws up the latency tracers */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400560 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
561 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500562
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500563 tr->max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200564 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200565 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200566 smp_wmb();
Jiri Olsa62b915f2010-04-02 19:01:22 +0200567
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500568 ftrace_init_array_ops(tr, irqsoff_tracer_call);
569
570 /* Only toplevel instance supports graph tracing */
571 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400572 is_graph(tr))))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200573 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500574
575 irqsoff_busy = true;
576 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200577}
578
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700579static void __irqsoff_tracer_reset(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200580{
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400581 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
582 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
583
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400584 stop_irqsoff_tracer(tr, is_graph(tr));
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500585
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400586 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
587 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500588 ftrace_reset_array_ops(tr);
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500589
590 irqsoff_busy = false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200591}
592
Steven Rostedt90369902008-11-05 16:05:44 -0500593static void irqsoff_tracer_start(struct trace_array *tr)
594{
Steven Rostedt90369902008-11-05 16:05:44 -0500595 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500596}
597
598static void irqsoff_tracer_stop(struct trace_array *tr)
599{
600 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200601}
602
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200603#ifdef CONFIG_IRQSOFF_TRACER
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700604/*
605 * We are only interested in hardirq on/off events:
606 */
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400607void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700608{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400609 unsigned int pc = preempt_count();
610
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400611 if (!preempt_trace(pc) && irq_trace())
612 stop_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700613}
614
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400615void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700616{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400617 unsigned int pc = preempt_count();
618
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400619 if (!preempt_trace(pc) && irq_trace())
620 start_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700621}
622
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100623static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200624{
625 trace_type = TRACER_IRQS_OFF;
626
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500627 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200628}
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700629
630static void irqsoff_tracer_reset(struct trace_array *tr)
631{
632 __irqsoff_tracer_reset(tr);
633}
634
Steven Rostedt81d68a92008-05-12 21:20:42 +0200635static struct tracer irqsoff_tracer __read_mostly =
636{
637 .name = "irqsoff",
638 .init = irqsoff_tracer_init,
639 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500640 .start = irqsoff_tracer_start,
641 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900642 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200643 .print_header = irqsoff_print_header,
644 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400645 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200646#ifdef CONFIG_FTRACE_SELFTEST
647 .selftest = trace_selftest_startup_irqsoff,
648#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200649 .open = irqsoff_trace_open,
650 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500651 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900652 .use_max_tr = true,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200653};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700654#endif /* CONFIG_IRQSOFF_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200655
656#ifdef CONFIG_PREEMPT_TRACER
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400657void tracer_preempt_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700658{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400659 int pc = preempt_count();
660
661 if (preempt_trace(pc) && !irq_trace())
662 stop_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700663}
664
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400665void tracer_preempt_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700666{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400667 int pc = preempt_count();
668
669 if (preempt_trace(pc) && !irq_trace())
670 start_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700671}
672
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100673static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200674{
675 trace_type = TRACER_PREEMPT_OFF;
676
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500677 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200678}
679
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700680static void preemptoff_tracer_reset(struct trace_array *tr)
681{
682 __irqsoff_tracer_reset(tr);
683}
684
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200685static struct tracer preemptoff_tracer __read_mostly =
686{
687 .name = "preemptoff",
688 .init = preemptoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700689 .reset = preemptoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500690 .start = irqsoff_tracer_start,
691 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900692 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200693 .print_header = irqsoff_print_header,
694 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400695 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200696#ifdef CONFIG_FTRACE_SELFTEST
697 .selftest = trace_selftest_startup_preemptoff,
698#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200699 .open = irqsoff_trace_open,
700 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500701 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900702 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200703};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700704#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200705
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700706#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200707
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100708static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200709{
710 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
711
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500712 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200713}
714
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700715static void preemptirqsoff_tracer_reset(struct trace_array *tr)
716{
717 __irqsoff_tracer_reset(tr);
718}
719
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200720static struct tracer preemptirqsoff_tracer __read_mostly =
721{
722 .name = "preemptirqsoff",
723 .init = preemptirqsoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700724 .reset = preemptirqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500725 .start = irqsoff_tracer_start,
726 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900727 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200728 .print_header = irqsoff_print_header,
729 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400730 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200731#ifdef CONFIG_FTRACE_SELFTEST
732 .selftest = trace_selftest_startup_preemptirqsoff,
733#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200734 .open = irqsoff_trace_open,
735 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500736 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900737 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200738};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200739#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200740
741__init static int init_irqsoff_tracer(void)
742{
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700743#ifdef CONFIG_IRQSOFF_TRACER
744 register_tracer(&irqsoff_tracer);
745#endif
746#ifdef CONFIG_PREEMPT_TRACER
747 register_tracer(&preemptoff_tracer);
748#endif
749#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
750 register_tracer(&preemptirqsoff_tracer);
751#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200752
753 return 0;
754}
Steven Rostedt6f415672012-10-05 12:13:07 -0400755core_initcall(init_irqsoff_tracer);
Joel Fernandesaaecaa02017-10-05 17:54:31 -0700756#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */