blob: 6756379b661fee56125f1ea81e8106ddbb1f6887 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedt81d68a92008-05-12 21:20:42 +02002/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05003 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02004 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010011 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt81d68a92008-05-12 21:20:42 +020012 */
13#include <linux/kallsyms.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020014#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090017#include <linux/kprobes.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020018
19#include "trace.h"
20
Joel Fernandesd5915812017-10-10 15:51:37 -070021#include <trace/events/preemptirq.h>
22
Joel Fernandesaaecaa02017-10-05 17:54:31 -070023#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt81d68a92008-05-12 21:20:42 +020024static struct trace_array *irqsoff_trace __read_mostly;
25static int tracer_enabled __read_mostly;
26
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020027static DEFINE_PER_CPU(int, tracing_cpu);
28
Thomas Gleixner5389f6f2009-07-25 17:13:33 +020029static DEFINE_RAW_SPINLOCK(max_trace_lock);
Steven Rostedt89b2f972008-05-12 21:20:44 +020030
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020031enum {
32 TRACER_IRQS_OFF = (1 << 1),
33 TRACER_PREEMPT_OFF = (1 << 2),
34};
35
36static int trace_type __read_mostly;
37
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -040038static int save_flags;
Steven Rostedte9d25fe2009-03-04 22:15:30 -050039
Jiri Olsa62b915f2010-04-02 19:01:22 +020040static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020043#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020044static inline int
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040045preempt_trace(int pc)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020046{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040047 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020048}
49#else
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040050# define preempt_trace(pc) (0)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020051#endif
52
53#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020054static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020055irq_trace(void)
56{
57 return ((trace_type & TRACER_IRQS_OFF) &&
58 irqs_disabled());
59}
60#else
61# define irq_trace() (0)
62#endif
63
Jiri Olsa62b915f2010-04-02 19:01:22 +020064#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040065static int irqsoff_display_graph(struct trace_array *tr, int set);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040066# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040067#else
68static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69{
70 return -EINVAL;
71}
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040072# define is_graph(tr) false
Jiri Olsa62b915f2010-04-02 19:01:22 +020073#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +020074
Steven Rostedt81d68a92008-05-12 21:20:42 +020075/*
76 * Sequence count - we record it when starting a measurement and
77 * skip the latency if the sequence has changed - some other section
78 * did a maximum and could disturb our measurement with serial console
79 * printouts, etc. Truly coinciding maximum latencies should be rare
Lucas De Marchi25985ed2011-03-30 22:57:33 -030080 * and what happens together happens separately as well, so this doesn't
Steven Rostedt81d68a92008-05-12 21:20:42 +020081 * decrease the validity of the maximum found:
82 */
83static __cacheline_aligned_in_smp unsigned long max_sequence;
84
Steven Rostedt606576c2008-10-06 19:06:12 -040085#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020086/*
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040087 * Prologue for the preempt and irqs off function tracers.
88 *
89 * Returns 1 if it is OK to continue, and data->disabled is
90 * incremented.
91 * 0 if the trace is to be ignored, and data->disabled
92 * is kept the same.
93 *
94 * Note, this function is also used outside this ifdef but
95 * inside the #ifdef of the function graph tracer below.
96 * This is OK, since the function graph tracer is
97 * dependent on the function tracer.
Steven Rostedt81d68a92008-05-12 21:20:42 +020098 */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040099static int func_prolog_dec(struct trace_array *tr,
100 struct trace_array_cpu **data,
101 unsigned long *flags)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200102{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200103 long disabled;
104 int cpu;
105
Steven Rostedt361943a2008-05-12 21:20:44 +0200106 /*
107 * Does not matter if we preempt. We test the flags
108 * afterward, to see if irqs are disabled or not.
109 * If we preempt and get a false positive, the flags
110 * test will fail.
111 */
112 cpu = raw_smp_processor_id();
113 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400114 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200115
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400116 local_save_flags(*flags);
Steven Rostedt (Red Hat)cb86e052016-03-18 12:27:43 -0400117 /*
118 * Slight chance to get a false positive on tracing_cpu,
119 * although I'm starting to think there isn't a chance.
120 * Leave this for now just to be paranoid.
121 */
122 if (!irqs_disabled_flags(*flags) && !preempt_count())
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400123 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200124
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500125 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400126 disabled = atomic_inc_return(&(*data)->disabled);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200127
128 if (likely(disabled == 1))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400129 return 1;
130
131 atomic_dec(&(*data)->disabled);
132
133 return 0;
134}
135
136/*
137 * irqsoff uses its own tracer function to keep the overhead down:
138 */
139static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -0400141 struct ftrace_ops *op, struct ftrace_regs *fregs)
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400142{
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
145 unsigned long flags;
146
147 if (!func_prolog_dec(tr, &data, &flags))
148 return;
149
150 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200151
152 atomic_dec(&data->disabled);
153}
Steven Rostedt606576c2008-10-06 19:06:12 -0400154#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200155
Jiri Olsa62b915f2010-04-02 19:01:22 +0200156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400157static int irqsoff_display_graph(struct trace_array *tr, int set)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200158{
159 int cpu;
160
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400161 if (!(is_graph(tr) ^ set))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200162 return 0;
163
164 stop_irqsoff_tracer(irqsoff_trace, !set);
165
166 for_each_possible_cpu(cpu)
167 per_cpu(tracing_cpu, cpu) = 0;
168
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500169 tr->max_latency = 0;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500170 tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200171
172 return start_irqsoff_tracer(irqsoff_trace, set);
173}
174
175static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176{
177 struct trace_array *tr = irqsoff_trace;
178 struct trace_array_cpu *data;
179 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200180 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200181 int pc;
182
Steven Rostedt (Red Hat)1a414422016-12-08 19:28:28 -0500183 if (ftrace_graph_ignore_func(trace))
184 return 0;
185 /*
186 * Do not trace a function if it's filtered by set_graph_notrace.
187 * Make the index of ret stack negative to indicate that it should
188 * ignore further functions. But it needs its own ret stack entry
189 * to recover the original index in order to continue tracing after
190 * returning from the function.
191 */
192 if (ftrace_graph_notrace_addr(trace->func))
193 return 1;
194
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400195 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200196 return 0;
197
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400198 pc = preempt_count();
199 ret = __trace_graph_entry(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200200 atomic_dec(&data->disabled);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400201
Jiri Olsa62b915f2010-04-02 19:01:22 +0200202 return ret;
203}
204
205static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206{
207 struct trace_array *tr = irqsoff_trace;
208 struct trace_array_cpu *data;
209 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200210 int pc;
211
Steven Rostedt (VMware)5cf99a0f2018-11-29 08:50:27 -0500212 ftrace_graph_addr_finish(trace);
213
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400214 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200215 return;
216
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400217 pc = preempt_count();
218 __trace_graph_return(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200219 atomic_dec(&data->disabled);
220}
221
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500222static struct fgraph_ops fgraph_ops = {
223 .entryfunc = &irqsoff_graph_entry,
224 .retfunc = &irqsoff_graph_return,
225};
226
Jiri Olsa62b915f2010-04-02 19:01:22 +0200227static void irqsoff_trace_open(struct trace_iterator *iter)
228{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400229 if (is_graph(iter->tr))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200230 graph_trace_open(iter);
231
232}
233
234static void irqsoff_trace_close(struct trace_iterator *iter)
235{
236 if (iter->private)
237 graph_trace_close(iter);
238}
239
240#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200241 TRACE_GRAPH_PRINT_PROC | \
Changbin Du9acd8de2019-01-01 23:46:10 +0800242 TRACE_GRAPH_PRINT_REL_TIME | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200243 TRACE_GRAPH_PRINT_DURATION)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200244
245static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
246{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200247 /*
248 * In graph mode call the graph tracer output function,
249 * otherwise go with the TRACE_FN event handler
250 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400251 if (is_graph(iter->tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200252 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200253
254 return TRACE_TYPE_UNHANDLED;
255}
256
257static void irqsoff_print_header(struct seq_file *s)
258{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400259 struct trace_array *tr = irqsoff_trace;
260
261 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200262 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
263 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200264 trace_default_header(s);
265}
266
267static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200268__trace_function(struct trace_array *tr,
269 unsigned long ip, unsigned long parent_ip,
270 unsigned long flags, int pc)
271{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400272 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200273 trace_graph_function(tr, ip, parent_ip, flags, pc);
274 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200275 trace_function(tr, ip, parent_ip, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200276}
277
278#else
279#define __trace_function trace_function
280
Jiri Olsa62b915f2010-04-02 19:01:22 +0200281static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
282{
283 return TRACE_TYPE_UNHANDLED;
284}
285
Jiri Olsa62b915f2010-04-02 19:01:22 +0200286static void irqsoff_trace_open(struct trace_iterator *iter) { }
287static void irqsoff_trace_close(struct trace_iterator *iter) { }
Jiri Olsa7e9a49e2011-11-07 16:08:49 +0100288
289#ifdef CONFIG_FUNCTION_TRACER
290static void irqsoff_print_header(struct seq_file *s)
291{
292 trace_default_header(s);
293}
294#else
295static void irqsoff_print_header(struct seq_file *s)
296{
297 trace_latency_header(s);
298}
299#endif /* CONFIG_FUNCTION_TRACER */
Jiri Olsa62b915f2010-04-02 19:01:22 +0200300#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
301
Steven Rostedt81d68a92008-05-12 21:20:42 +0200302/*
303 * Should this new latency be reported/recorded?
304 */
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100305static bool report_latency(struct trace_array *tr, u64 delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200306{
307 if (tracing_thresh) {
308 if (delta < tracing_thresh)
Yaowei Bai79851822015-09-29 22:43:30 +0800309 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200310 } else {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500311 if (delta <= tr->max_latency)
Yaowei Bai79851822015-09-29 22:43:30 +0800312 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200313 }
Yaowei Bai79851822015-09-29 22:43:30 +0800314 return true;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200315}
316
Ingo Molnare309b412008-05-12 21:20:51 +0200317static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200318check_critical_timing(struct trace_array *tr,
319 struct trace_array_cpu *data,
320 unsigned long parent_ip,
321 int cpu)
322{
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100323 u64 T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200324 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400325 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200326
Steven Rostedt81d68a92008-05-12 21:20:42 +0200327 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200328 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200329 delta = T1-T0;
330
331 local_save_flags(flags);
332
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400333 pc = preempt_count();
334
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500335 if (!report_latency(tr, delta))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200336 goto out;
337
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200338 raw_spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200339
Steven Rostedt89b2f972008-05-12 21:20:44 +0200340 /* check if we are still the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500341 if (!report_latency(tr, delta))
Steven Rostedt89b2f972008-05-12 21:20:44 +0200342 goto out_unlock;
343
Jiri Olsa62b915f2010-04-02 19:01:22 +0200344 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500345 /* Skip 5 functions to get to the irq/preempt enable function */
346 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200347
Steven Rostedt81d68a92008-05-12 21:20:42 +0200348 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200349 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200350
Steven Rostedt81d68a92008-05-12 21:20:42 +0200351 data->critical_end = parent_ip;
352
Carsten Emdeb5130b12009-09-13 01:43:07 +0200353 if (likely(!is_tracing_stopped())) {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500354 tr->max_latency = delta;
Carsten Emdeb5130b12009-09-13 01:43:07 +0200355 update_max_tr_single(tr, current, cpu);
356 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200357
Steven Rostedt81d68a92008-05-12 21:20:42 +0200358 max_sequence++;
359
Steven Rostedt89b2f972008-05-12 21:20:44 +0200360out_unlock:
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200361 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200362
Steven Rostedt81d68a92008-05-12 21:20:42 +0200363out:
364 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200365 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200366 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200367}
368
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900369static nokprobe_inline void
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400370start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200371{
372 int cpu;
373 struct trace_array *tr = irqsoff_trace;
374 struct trace_array_cpu *data;
375 unsigned long flags;
376
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400377 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200378 return;
379
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200383 return;
384
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500385 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200386
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200387 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200393 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200394 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200395
396 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200397
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400398 __trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200399
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200400 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200401
Steven Rostedt81d68a92008-05-12 21:20:42 +0200402 atomic_dec(&data->disabled);
403}
404
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900405static nokprobe_inline void
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400406stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200407{
408 int cpu;
409 struct trace_array *tr = irqsoff_trace;
410 struct trace_array_cpu *data;
411 unsigned long flags;
412
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200413 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200414 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200415 if (unlikely(per_cpu(tracing_cpu, cpu)))
416 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200417 else
418 return;
419
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400420 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200421 return;
422
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500423 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200424
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400425 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200426 !data->critical_start || atomic_read(&data->disabled))
427 return;
428
429 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200430
Steven Rostedt81d68a92008-05-12 21:20:42 +0200431 local_save_flags(flags);
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400432 __trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200434 data->critical_start = 0;
435 atomic_dec(&data->disabled);
436}
437
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200438/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200439void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200440{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400441 int pc = preempt_count();
442
443 if (preempt_trace(pc) || irq_trace())
444 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200445}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200446EXPORT_SYMBOL_GPL(start_critical_timings);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900447NOKPROBE_SYMBOL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200448
Ingo Molnare309b412008-05-12 21:20:51 +0200449void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200450{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400451 int pc = preempt_count();
452
453 if (preempt_trace(pc) || irq_trace())
454 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200455}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200456EXPORT_SYMBOL_GPL(stop_critical_timings);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900457NOKPROBE_SYMBOL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200458
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400459#ifdef CONFIG_FUNCTION_TRACER
460static bool function_enabled;
461
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500462static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200463{
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400464 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200465
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400466 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400467 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400468 return 0;
469
470 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500471 ret = register_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400472 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500473 ret = register_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400474
475 if (!ret)
476 function_enabled = true;
477
478 return ret;
479}
480
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500481static void unregister_irqsoff_function(struct trace_array *tr, int graph)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400482{
483 if (!function_enabled)
484 return;
485
486 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500487 unregister_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400488 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500489 unregister_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400490
491 function_enabled = false;
492}
493
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400494static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400495{
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400496 if (!(mask & TRACE_ITER_FUNCTION))
497 return 0;
498
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400499 if (set)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400500 register_irqsoff_function(tr, is_graph(tr), 1);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400501 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400502 unregister_irqsoff_function(tr, is_graph(tr));
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400503 return 1;
504}
505#else
506static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
507{
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400508 return 0;
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400509}
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400510static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
511static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
512{
513 return 0;
514}
515#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400516
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500517static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400518{
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500519 struct tracer *tracer = tr->current_trace;
520
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400521 if (irqsoff_function_set(tr, mask, set))
522 return 0;
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400523
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400524#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400525 if (mask & TRACE_ITER_DISPLAY_GRAPH)
526 return irqsoff_display_graph(tr, set);
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400527#endif
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400528
529 return trace_keep_overwrite(tracer, mask, set);
530}
531
532static int start_irqsoff_tracer(struct trace_array *tr, int graph)
533{
534 int ret;
535
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500536 ret = register_irqsoff_function(tr, graph, 0);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200537
538 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500539 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500540 else
Steven Rostedt90369902008-11-05 16:05:44 -0500541 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200542
543 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200544}
545
Jiri Olsa62b915f2010-04-02 19:01:22 +0200546static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200547{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200548 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200549
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500550 unregister_irqsoff_function(tr, graph);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200551}
552
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500553static bool irqsoff_busy;
554
555static int __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200556{
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500557 if (irqsoff_busy)
558 return -EBUSY;
559
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400560 save_flags = tr->trace_flags;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400561
562 /* non overwrite screws up the latency tracers */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400563 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
564 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100565 /* without pause, we will produce garbage if another latency occurs */
566 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500567
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500568 tr->max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200569 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200570 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200571 smp_wmb();
Jiri Olsa62b915f2010-04-02 19:01:22 +0200572
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500573 ftrace_init_array_ops(tr, irqsoff_tracer_call);
574
575 /* Only toplevel instance supports graph tracing */
576 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400577 is_graph(tr))))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200578 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500579
580 irqsoff_busy = true;
581 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200582}
583
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700584static void __irqsoff_tracer_reset(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200585{
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400586 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
587 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100588 int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400589
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400590 stop_irqsoff_tracer(tr, is_graph(tr));
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500591
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400592 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
593 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100594 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500595 ftrace_reset_array_ops(tr);
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500596
597 irqsoff_busy = false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200598}
599
Steven Rostedt90369902008-11-05 16:05:44 -0500600static void irqsoff_tracer_start(struct trace_array *tr)
601{
Steven Rostedt90369902008-11-05 16:05:44 -0500602 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500603}
604
605static void irqsoff_tracer_stop(struct trace_array *tr)
606{
607 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200608}
609
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200610#ifdef CONFIG_IRQSOFF_TRACER
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700611/*
612 * We are only interested in hardirq on/off events:
613 */
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400614void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700615{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400616 unsigned int pc = preempt_count();
617
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400618 if (!preempt_trace(pc) && irq_trace())
619 stop_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700620}
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900621NOKPROBE_SYMBOL(tracer_hardirqs_on);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700622
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400623void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700624{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400625 unsigned int pc = preempt_count();
626
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400627 if (!preempt_trace(pc) && irq_trace())
628 start_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700629}
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900630NOKPROBE_SYMBOL(tracer_hardirqs_off);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700631
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100632static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200633{
634 trace_type = TRACER_IRQS_OFF;
635
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500636 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200637}
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700638
639static void irqsoff_tracer_reset(struct trace_array *tr)
640{
641 __irqsoff_tracer_reset(tr);
642}
643
Steven Rostedt81d68a92008-05-12 21:20:42 +0200644static struct tracer irqsoff_tracer __read_mostly =
645{
646 .name = "irqsoff",
647 .init = irqsoff_tracer_init,
648 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500649 .start = irqsoff_tracer_start,
650 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900651 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200652 .print_header = irqsoff_print_header,
653 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400654 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200655#ifdef CONFIG_FTRACE_SELFTEST
656 .selftest = trace_selftest_startup_irqsoff,
657#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200658 .open = irqsoff_trace_open,
659 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500660 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900661 .use_max_tr = true,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200662};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700663#endif /* CONFIG_IRQSOFF_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200664
665#ifdef CONFIG_PREEMPT_TRACER
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400666void tracer_preempt_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700667{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400668 int pc = preempt_count();
669
670 if (preempt_trace(pc) && !irq_trace())
671 stop_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700672}
673
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400674void tracer_preempt_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700675{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -0400676 int pc = preempt_count();
677
678 if (preempt_trace(pc) && !irq_trace())
679 start_critical_timing(a0, a1, pc);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700680}
681
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100682static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200683{
684 trace_type = TRACER_PREEMPT_OFF;
685
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500686 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200687}
688
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700689static void preemptoff_tracer_reset(struct trace_array *tr)
690{
691 __irqsoff_tracer_reset(tr);
692}
693
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200694static struct tracer preemptoff_tracer __read_mostly =
695{
696 .name = "preemptoff",
697 .init = preemptoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700698 .reset = preemptoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500699 .start = irqsoff_tracer_start,
700 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900701 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200702 .print_header = irqsoff_print_header,
703 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400704 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200705#ifdef CONFIG_FTRACE_SELFTEST
706 .selftest = trace_selftest_startup_preemptoff,
707#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200708 .open = irqsoff_trace_open,
709 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500710 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900711 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200712};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700713#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200714
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700715#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200716
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100717static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200718{
719 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
720
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500721 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200722}
723
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700724static void preemptirqsoff_tracer_reset(struct trace_array *tr)
725{
726 __irqsoff_tracer_reset(tr);
727}
728
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200729static struct tracer preemptirqsoff_tracer __read_mostly =
730{
731 .name = "preemptirqsoff",
732 .init = preemptirqsoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700733 .reset = preemptirqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500734 .start = irqsoff_tracer_start,
735 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900736 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200737 .print_header = irqsoff_print_header,
738 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400739 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200740#ifdef CONFIG_FTRACE_SELFTEST
741 .selftest = trace_selftest_startup_preemptirqsoff,
742#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200743 .open = irqsoff_trace_open,
744 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500745 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900746 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200747};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200748#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200749
750__init static int init_irqsoff_tracer(void)
751{
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700752#ifdef CONFIG_IRQSOFF_TRACER
753 register_tracer(&irqsoff_tracer);
754#endif
755#ifdef CONFIG_PREEMPT_TRACER
756 register_tracer(&preemptoff_tracer);
757#endif
758#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
759 register_tracer(&preemptirqsoff_tracer);
760#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200761
762 return 0;
763}
Steven Rostedt6f415672012-10-05 12:13:07 -0400764core_initcall(init_irqsoff_tracer);
Joel Fernandesaaecaa02017-10-05 17:54:31 -0700765#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */