blob: 590b3d51afae987acd0c3ee989a616b672dafa43 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedt81d68a92008-05-12 21:20:42 +02002/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05003 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02004 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010011 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt81d68a92008-05-12 21:20:42 +020012 */
13#include <linux/kallsyms.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020014#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090017#include <linux/kprobes.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020018
19#include "trace.h"
20
Joel Fernandesd5915812017-10-10 15:51:37 -070021#include <trace/events/preemptirq.h>
22
Joel Fernandesaaecaa02017-10-05 17:54:31 -070023#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt81d68a92008-05-12 21:20:42 +020024static struct trace_array *irqsoff_trace __read_mostly;
25static int tracer_enabled __read_mostly;
26
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020027static DEFINE_PER_CPU(int, tracing_cpu);
28
Thomas Gleixner5389f6f2009-07-25 17:13:33 +020029static DEFINE_RAW_SPINLOCK(max_trace_lock);
Steven Rostedt89b2f972008-05-12 21:20:44 +020030
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020031enum {
32 TRACER_IRQS_OFF = (1 << 1),
33 TRACER_PREEMPT_OFF = (1 << 2),
34};
35
36static int trace_type __read_mostly;
37
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -040038static int save_flags;
Steven Rostedte9d25fe2009-03-04 22:15:30 -050039
Jiri Olsa62b915f2010-04-02 19:01:22 +020040static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020043#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020044static inline int
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040045preempt_trace(int pc)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020046{
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040047 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020048}
49#else
Steven Rostedt (VMware)f27107f2018-08-07 17:03:12 -040050# define preempt_trace(pc) (0)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020051#endif
52
53#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020054static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020055irq_trace(void)
56{
57 return ((trace_type & TRACER_IRQS_OFF) &&
58 irqs_disabled());
59}
60#else
61# define irq_trace() (0)
62#endif
63
Jiri Olsa62b915f2010-04-02 19:01:22 +020064#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040065static int irqsoff_display_graph(struct trace_array *tr, int set);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040066# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040067#else
68static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69{
70 return -EINVAL;
71}
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040072# define is_graph(tr) false
Jiri Olsa62b915f2010-04-02 19:01:22 +020073#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +020074
Steven Rostedt81d68a92008-05-12 21:20:42 +020075/*
76 * Sequence count - we record it when starting a measurement and
77 * skip the latency if the sequence has changed - some other section
78 * did a maximum and could disturb our measurement with serial console
79 * printouts, etc. Truly coinciding maximum latencies should be rare
Lucas De Marchi25985ed2011-03-30 22:57:33 -030080 * and what happens together happens separately as well, so this doesn't
Steven Rostedt81d68a92008-05-12 21:20:42 +020081 * decrease the validity of the maximum found:
82 */
83static __cacheline_aligned_in_smp unsigned long max_sequence;
84
Steven Rostedt606576c2008-10-06 19:06:12 -040085#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020086/*
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040087 * Prologue for the preempt and irqs off function tracers.
88 *
89 * Returns 1 if it is OK to continue, and data->disabled is
90 * incremented.
91 * 0 if the trace is to be ignored, and data->disabled
92 * is kept the same.
93 *
94 * Note, this function is also used outside this ifdef but
95 * inside the #ifdef of the function graph tracer below.
96 * This is OK, since the function graph tracer is
97 * dependent on the function tracer.
Steven Rostedt81d68a92008-05-12 21:20:42 +020098 */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040099static int func_prolog_dec(struct trace_array *tr,
100 struct trace_array_cpu **data,
101 unsigned long *flags)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200102{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200103 long disabled;
104 int cpu;
105
Steven Rostedt361943a2008-05-12 21:20:44 +0200106 /*
107 * Does not matter if we preempt. We test the flags
108 * afterward, to see if irqs are disabled or not.
109 * If we preempt and get a false positive, the flags
110 * test will fail.
111 */
112 cpu = raw_smp_processor_id();
113 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400114 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200115
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400116 local_save_flags(*flags);
Steven Rostedt (Red Hat)cb86e052016-03-18 12:27:43 -0400117 /*
118 * Slight chance to get a false positive on tracing_cpu,
119 * although I'm starting to think there isn't a chance.
120 * Leave this for now just to be paranoid.
121 */
122 if (!irqs_disabled_flags(*flags) && !preempt_count())
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400123 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200124
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500125 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400126 disabled = atomic_inc_return(&(*data)->disabled);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200127
128 if (likely(disabled == 1))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400129 return 1;
130
131 atomic_dec(&(*data)->disabled);
132
133 return 0;
134}
135
136/*
137 * irqsoff uses its own tracer function to keep the overhead down:
138 */
139static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -0400141 struct ftrace_ops *op, struct ftrace_regs *fregs)
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400142{
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
145 unsigned long flags;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100146 unsigned int trace_ctx;
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400147
148 if (!func_prolog_dec(tr, &data, &flags))
149 return;
150
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100151 trace_ctx = tracing_gen_ctx_flags(flags);
152
153 trace_function(tr, ip, parent_ip, trace_ctx);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200154
155 atomic_dec(&data->disabled);
156}
Steven Rostedt606576c2008-10-06 19:06:12 -0400157#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200158
Jiri Olsa62b915f2010-04-02 19:01:22 +0200159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400160static int irqsoff_display_graph(struct trace_array *tr, int set)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200161{
162 int cpu;
163
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400164 if (!(is_graph(tr) ^ set))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200165 return 0;
166
167 stop_irqsoff_tracer(irqsoff_trace, !set);
168
169 for_each_possible_cpu(cpu)
170 per_cpu(tracing_cpu, cpu) = 0;
171
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500172 tr->max_latency = 0;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500173 tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200174
175 return start_irqsoff_tracer(irqsoff_trace, set);
176}
177
178static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
179{
180 struct trace_array *tr = irqsoff_trace;
181 struct trace_array_cpu *data;
182 unsigned long flags;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100183 unsigned int trace_ctx;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200184 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200185
Steven Rostedt (Red Hat)1a414422016-12-08 19:28:28 -0500186 if (ftrace_graph_ignore_func(trace))
187 return 0;
188 /*
189 * Do not trace a function if it's filtered by set_graph_notrace.
190 * Make the index of ret stack negative to indicate that it should
191 * ignore further functions. But it needs its own ret stack entry
192 * to recover the original index in order to continue tracing after
193 * returning from the function.
194 */
195 if (ftrace_graph_notrace_addr(trace->func))
196 return 1;
197
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400198 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200199 return 0;
200
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100201 trace_ctx = tracing_gen_ctx_flags(flags);
202 ret = __trace_graph_entry(tr, trace, trace_ctx);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200203 atomic_dec(&data->disabled);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400204
Jiri Olsa62b915f2010-04-02 19:01:22 +0200205 return ret;
206}
207
208static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
209{
210 struct trace_array *tr = irqsoff_trace;
211 struct trace_array_cpu *data;
212 unsigned long flags;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100213 unsigned int trace_ctx;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200214
Steven Rostedt (VMware)5cf99a0f2018-11-29 08:50:27 -0500215 ftrace_graph_addr_finish(trace);
216
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400217 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200218 return;
219
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100220 trace_ctx = tracing_gen_ctx_flags(flags);
221 __trace_graph_return(tr, trace, trace_ctx);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200222 atomic_dec(&data->disabled);
223}
224
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500225static struct fgraph_ops fgraph_ops = {
226 .entryfunc = &irqsoff_graph_entry,
227 .retfunc = &irqsoff_graph_return,
228};
229
Jiri Olsa62b915f2010-04-02 19:01:22 +0200230static void irqsoff_trace_open(struct trace_iterator *iter)
231{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400232 if (is_graph(iter->tr))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200233 graph_trace_open(iter);
234
235}
236
237static void irqsoff_trace_close(struct trace_iterator *iter)
238{
239 if (iter->private)
240 graph_trace_close(iter);
241}
242
243#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200244 TRACE_GRAPH_PRINT_PROC | \
Changbin Du9acd8de2019-01-01 23:46:10 +0800245 TRACE_GRAPH_PRINT_REL_TIME | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200246 TRACE_GRAPH_PRINT_DURATION)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200247
248static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
249{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200250 /*
251 * In graph mode call the graph tracer output function,
252 * otherwise go with the TRACE_FN event handler
253 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400254 if (is_graph(iter->tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200255 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200256
257 return TRACE_TYPE_UNHANDLED;
258}
259
260static void irqsoff_print_header(struct seq_file *s)
261{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400262 struct trace_array *tr = irqsoff_trace;
263
264 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200265 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
266 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200267 trace_default_header(s);
268}
269
270static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200271__trace_function(struct trace_array *tr,
272 unsigned long ip, unsigned long parent_ip,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100273 unsigned int trace_ctx)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200274{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400275 if (is_graph(tr))
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100276 trace_graph_function(tr, ip, parent_ip, trace_ctx);
Jiri Olsa0a772622010-09-23 14:00:52 +0200277 else
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100278 trace_function(tr, ip, parent_ip, trace_ctx);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200279}
280
281#else
282#define __trace_function trace_function
283
Jiri Olsa62b915f2010-04-02 19:01:22 +0200284static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
285{
286 return TRACE_TYPE_UNHANDLED;
287}
288
Jiri Olsa62b915f2010-04-02 19:01:22 +0200289static void irqsoff_trace_open(struct trace_iterator *iter) { }
290static void irqsoff_trace_close(struct trace_iterator *iter) { }
Jiri Olsa7e9a49e2011-11-07 16:08:49 +0100291
292#ifdef CONFIG_FUNCTION_TRACER
293static void irqsoff_print_header(struct seq_file *s)
294{
295 trace_default_header(s);
296}
297#else
298static void irqsoff_print_header(struct seq_file *s)
299{
300 trace_latency_header(s);
301}
302#endif /* CONFIG_FUNCTION_TRACER */
Jiri Olsa62b915f2010-04-02 19:01:22 +0200303#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
304
Steven Rostedt81d68a92008-05-12 21:20:42 +0200305/*
306 * Should this new latency be reported/recorded?
307 */
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100308static bool report_latency(struct trace_array *tr, u64 delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200309{
310 if (tracing_thresh) {
311 if (delta < tracing_thresh)
Yaowei Bai79851822015-09-29 22:43:30 +0800312 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200313 } else {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500314 if (delta <= tr->max_latency)
Yaowei Bai79851822015-09-29 22:43:30 +0800315 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200316 }
Yaowei Bai79851822015-09-29 22:43:30 +0800317 return true;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200318}
319
Ingo Molnare309b412008-05-12 21:20:51 +0200320static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200321check_critical_timing(struct trace_array *tr,
322 struct trace_array_cpu *data,
323 unsigned long parent_ip,
324 int cpu)
325{
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100326 u64 T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200327 unsigned long flags;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100328 unsigned int trace_ctx;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200329
Steven Rostedt81d68a92008-05-12 21:20:42 +0200330 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200331 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200332 delta = T1-T0;
333
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100334 trace_ctx = tracing_gen_ctx();
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400335
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500336 if (!report_latency(tr, delta))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200337 goto out;
338
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200339 raw_spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200340
Steven Rostedt89b2f972008-05-12 21:20:44 +0200341 /* check if we are still the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500342 if (!report_latency(tr, delta))
Steven Rostedt89b2f972008-05-12 21:20:44 +0200343 goto out_unlock;
344
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100345 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500346 /* Skip 5 functions to get to the irq/preempt enable function */
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100347 __trace_stack(tr, trace_ctx, 5);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200348
Steven Rostedt81d68a92008-05-12 21:20:42 +0200349 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200350 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200351
Steven Rostedt81d68a92008-05-12 21:20:42 +0200352 data->critical_end = parent_ip;
353
Carsten Emdeb5130b12009-09-13 01:43:07 +0200354 if (likely(!is_tracing_stopped())) {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500355 tr->max_latency = delta;
Carsten Emdeb5130b12009-09-13 01:43:07 +0200356 update_max_tr_single(tr, current, cpu);
357 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200358
Steven Rostedt81d68a92008-05-12 21:20:42 +0200359 max_sequence++;
360
Steven Rostedt89b2f972008-05-12 21:20:44 +0200361out_unlock:
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200362 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200363
Steven Rostedt81d68a92008-05-12 21:20:42 +0200364out:
365 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200366 data->preempt_timestamp = ftrace_now(cpu);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100367 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200368}
369
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900370static nokprobe_inline void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100371start_critical_timing(unsigned long ip, unsigned long parent_ip)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200372{
373 int cpu;
374 struct trace_array *tr = irqsoff_trace;
375 struct trace_array_cpu *data;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200376
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400377 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200378 return;
379
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200383 return;
384
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500385 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200386
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200387 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200393 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200394 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200395
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100396 __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200397
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200398 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200399
Steven Rostedt81d68a92008-05-12 21:20:42 +0200400 atomic_dec(&data->disabled);
401}
402
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900403static nokprobe_inline void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100404stop_critical_timing(unsigned long ip, unsigned long parent_ip)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200405{
406 int cpu;
407 struct trace_array *tr = irqsoff_trace;
408 struct trace_array_cpu *data;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100409 unsigned int trace_ctx;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200410
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200411 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200412 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200413 if (unlikely(per_cpu(tracing_cpu, cpu)))
414 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200415 else
416 return;
417
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400418 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200419 return;
420
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500421 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200422
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400423 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200424 !data->critical_start || atomic_read(&data->disabled))
425 return;
426
427 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200428
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100429 trace_ctx = tracing_gen_ctx();
430 __trace_function(tr, ip, parent_ip, trace_ctx);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200431 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200432 data->critical_start = 0;
433 atomic_dec(&data->disabled);
434}
435
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200436/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200437void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200438{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100439 if (preempt_trace(preempt_count()) || irq_trace())
440 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200441}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200442EXPORT_SYMBOL_GPL(start_critical_timings);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900443NOKPROBE_SYMBOL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200444
Ingo Molnare309b412008-05-12 21:20:51 +0200445void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200446{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100447 if (preempt_trace(preempt_count()) || irq_trace())
448 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200449}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200450EXPORT_SYMBOL_GPL(stop_critical_timings);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900451NOKPROBE_SYMBOL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200452
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400453#ifdef CONFIG_FUNCTION_TRACER
454static bool function_enabled;
455
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500456static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200457{
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400458 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200459
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400460 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400461 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400462 return 0;
463
464 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500465 ret = register_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400466 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500467 ret = register_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400468
469 if (!ret)
470 function_enabled = true;
471
472 return ret;
473}
474
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500475static void unregister_irqsoff_function(struct trace_array *tr, int graph)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400476{
477 if (!function_enabled)
478 return;
479
480 if (graph)
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500481 unregister_ftrace_graph(&fgraph_ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400482 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500483 unregister_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400484
485 function_enabled = false;
486}
487
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400488static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400489{
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400490 if (!(mask & TRACE_ITER_FUNCTION))
491 return 0;
492
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400493 if (set)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400494 register_irqsoff_function(tr, is_graph(tr), 1);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400495 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400496 unregister_irqsoff_function(tr, is_graph(tr));
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400497 return 1;
498}
499#else
500static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
501{
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400502 return 0;
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400503}
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400504static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
505static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
506{
507 return 0;
508}
509#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400510
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500511static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400512{
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500513 struct tracer *tracer = tr->current_trace;
514
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400515 if (irqsoff_function_set(tr, mask, set))
516 return 0;
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400517
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400518#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400519 if (mask & TRACE_ITER_DISPLAY_GRAPH)
520 return irqsoff_display_graph(tr, set);
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400521#endif
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400522
523 return trace_keep_overwrite(tracer, mask, set);
524}
525
526static int start_irqsoff_tracer(struct trace_array *tr, int graph)
527{
528 int ret;
529
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500530 ret = register_irqsoff_function(tr, graph, 0);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200531
532 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500533 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500534 else
Steven Rostedt90369902008-11-05 16:05:44 -0500535 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200536
537 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200538}
539
Jiri Olsa62b915f2010-04-02 19:01:22 +0200540static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200541{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200542 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200543
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500544 unregister_irqsoff_function(tr, graph);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200545}
546
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500547static bool irqsoff_busy;
548
549static int __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200550{
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500551 if (irqsoff_busy)
552 return -EBUSY;
553
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400554 save_flags = tr->trace_flags;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400555
556 /* non overwrite screws up the latency tracers */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400557 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
558 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100559 /* without pause, we will produce garbage if another latency occurs */
560 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500561
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500562 tr->max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200563 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200564 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200565 smp_wmb();
Jiri Olsa62b915f2010-04-02 19:01:22 +0200566
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500567 ftrace_init_array_ops(tr, irqsoff_tracer_call);
568
569 /* Only toplevel instance supports graph tracing */
570 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400571 is_graph(tr))))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200572 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500573
574 irqsoff_busy = true;
575 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200576}
577
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700578static void __irqsoff_tracer_reset(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200579{
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400580 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
581 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100582 int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400583
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400584 stop_irqsoff_tracer(tr, is_graph(tr));
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500585
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400586 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
587 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
Viktor Rosendahlda7f84c2021-01-19 17:43:43 +0100588 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500589 ftrace_reset_array_ops(tr);
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500590
591 irqsoff_busy = false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200592}
593
Steven Rostedt90369902008-11-05 16:05:44 -0500594static void irqsoff_tracer_start(struct trace_array *tr)
595{
Steven Rostedt90369902008-11-05 16:05:44 -0500596 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500597}
598
599static void irqsoff_tracer_stop(struct trace_array *tr)
600{
601 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200602}
603
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200604#ifdef CONFIG_IRQSOFF_TRACER
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700605/*
606 * We are only interested in hardirq on/off events:
607 */
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400608void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700609{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100610 if (!preempt_trace(preempt_count()) && irq_trace())
611 stop_critical_timing(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700612}
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900613NOKPROBE_SYMBOL(tracer_hardirqs_on);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700614
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400615void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700616{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100617 if (!preempt_trace(preempt_count()) && irq_trace())
618 start_critical_timing(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700619}
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900620NOKPROBE_SYMBOL(tracer_hardirqs_off);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700621
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100622static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200623{
624 trace_type = TRACER_IRQS_OFF;
625
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500626 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200627}
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700628
629static void irqsoff_tracer_reset(struct trace_array *tr)
630{
631 __irqsoff_tracer_reset(tr);
632}
633
Steven Rostedt81d68a92008-05-12 21:20:42 +0200634static struct tracer irqsoff_tracer __read_mostly =
635{
636 .name = "irqsoff",
637 .init = irqsoff_tracer_init,
638 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500639 .start = irqsoff_tracer_start,
640 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900641 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200642 .print_header = irqsoff_print_header,
643 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400644 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200645#ifdef CONFIG_FTRACE_SELFTEST
646 .selftest = trace_selftest_startup_irqsoff,
647#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200648 .open = irqsoff_trace_open,
649 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500650 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900651 .use_max_tr = true,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200652};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700653#endif /* CONFIG_IRQSOFF_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200654
655#ifdef CONFIG_PREEMPT_TRACER
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400656void tracer_preempt_on(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700657{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100658 if (preempt_trace(preempt_count()) && !irq_trace())
659 stop_critical_timing(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700660}
661
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400662void tracer_preempt_off(unsigned long a0, unsigned long a1)
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700663{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100664 if (preempt_trace(preempt_count()) && !irq_trace())
665 start_critical_timing(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700666}
667
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100668static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200669{
670 trace_type = TRACER_PREEMPT_OFF;
671
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500672 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200673}
674
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700675static void preemptoff_tracer_reset(struct trace_array *tr)
676{
677 __irqsoff_tracer_reset(tr);
678}
679
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200680static struct tracer preemptoff_tracer __read_mostly =
681{
682 .name = "preemptoff",
683 .init = preemptoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700684 .reset = preemptoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500685 .start = irqsoff_tracer_start,
686 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900687 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200688 .print_header = irqsoff_print_header,
689 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400690 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200691#ifdef CONFIG_FTRACE_SELFTEST
692 .selftest = trace_selftest_startup_preemptoff,
693#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200694 .open = irqsoff_trace_open,
695 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500696 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900697 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200698};
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700699#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200700
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700701#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200702
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100703static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200704{
705 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
706
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500707 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200708}
709
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700710static void preemptirqsoff_tracer_reset(struct trace_array *tr)
711{
712 __irqsoff_tracer_reset(tr);
713}
714
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200715static struct tracer preemptirqsoff_tracer __read_mostly =
716{
717 .name = "preemptirqsoff",
718 .init = preemptirqsoff_tracer_init,
Joel Fernandes (Google)2b27ece2018-06-28 11:21:45 -0700719 .reset = preemptirqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500720 .start = irqsoff_tracer_start,
721 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900722 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200723 .print_header = irqsoff_print_header,
724 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400725 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200726#ifdef CONFIG_FTRACE_SELFTEST
727 .selftest = trace_selftest_startup_preemptirqsoff,
728#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200729 .open = irqsoff_trace_open,
730 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500731 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900732 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200733};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200734#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200735
736__init static int init_irqsoff_tracer(void)
737{
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700738#ifdef CONFIG_IRQSOFF_TRACER
739 register_tracer(&irqsoff_tracer);
740#endif
741#ifdef CONFIG_PREEMPT_TRACER
742 register_tracer(&preemptoff_tracer);
743#endif
744#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
745 register_tracer(&preemptirqsoff_tracer);
746#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200747
748 return 0;
749}
Steven Rostedt6f415672012-10-05 12:13:07 -0400750core_initcall(init_irqsoff_tracer);
Joel Fernandesaaecaa02017-10-05 17:54:31 -0700751#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */