blob: 4047e98afcbaeac84c744be078d451c75f47b9d3 [file] [log] [blame]
Steven Rostedt81d68a92008-05-12 21:20:42 +02001/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05002 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02003 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020024static DEFINE_PER_CPU(int, tracing_cpu);
25
Steven Rostedt89b2f972008-05-12 21:20:44 +020026static DEFINE_SPINLOCK(max_trace_lock);
27
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020028enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
Steven Rostedte9d25fe2009-03-04 22:15:30 -050035static int save_lat_flag;
36
Jiri Olsa62b915f2010-04-02 19:01:22 +020037static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020040#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020041static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020042preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020051static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020052irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
Jiri Olsa62b915f2010-04-02 19:01:22 +020061#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
Steven Rostedt81d68a92008-05-12 21:20:42 +020078/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt
84 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
Steven Rostedt606576c2008-10-06 19:06:12 -040088#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020089/*
90 * irqsoff uses its own tracer function to keep the overhead down:
91 */
Ingo Molnare309b412008-05-12 21:20:51 +020092static void
Steven Rostedt81d68a92008-05-12 21:20:42 +020093irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
94{
95 struct trace_array *tr = irqsoff_trace;
96 struct trace_array_cpu *data;
97 unsigned long flags;
98 long disabled;
99 int cpu;
100
Steven Rostedt361943a2008-05-12 21:20:44 +0200101 /*
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
105 * test will fail.
106 */
107 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200109 return;
110
111 local_save_flags(flags);
Steven Rostedt361943a2008-05-12 21:20:44 +0200112 /* slight chance to get a false positive on tracing_cpu */
113 if (!irqs_disabled_flags(flags))
114 return;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200115
Steven Rostedt81d68a92008-05-12 21:20:42 +0200116 data = tr->data[cpu];
117 disabled = atomic_inc_return(&data->disabled);
118
119 if (likely(disabled == 1))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500120 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200121
122 atomic_dec(&data->disabled);
123}
124
125static struct ftrace_ops trace_ops __read_mostly =
126{
127 .func = irqsoff_tracer_call,
128};
Steven Rostedt606576c2008-10-06 19:06:12 -0400129#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200130
Jiri Olsa62b915f2010-04-02 19:01:22 +0200131#ifdef CONFIG_FUNCTION_GRAPH_TRACER
132static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
133{
134 int cpu;
135
136 if (!(bit & TRACE_DISPLAY_GRAPH))
137 return -EINVAL;
138
139 if (!(is_graph() ^ set))
140 return 0;
141
142 stop_irqsoff_tracer(irqsoff_trace, !set);
143
144 for_each_possible_cpu(cpu)
145 per_cpu(tracing_cpu, cpu) = 0;
146
147 tracing_max_latency = 0;
148 tracing_reset_online_cpus(irqsoff_trace);
149
150 return start_irqsoff_tracer(irqsoff_trace, set);
151}
152
153static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
154{
155 struct trace_array *tr = irqsoff_trace;
156 struct trace_array_cpu *data;
157 unsigned long flags;
158 long disabled;
159 int ret;
160 int cpu;
161 int pc;
162
163 cpu = raw_smp_processor_id();
164 if (likely(!per_cpu(tracing_cpu, cpu)))
165 return 0;
166
167 local_save_flags(flags);
168 /* slight chance to get a false positive on tracing_cpu */
169 if (!irqs_disabled_flags(flags))
170 return 0;
171
172 data = tr->data[cpu];
173 disabled = atomic_inc_return(&data->disabled);
174
175 if (likely(disabled == 1)) {
176 pc = preempt_count();
177 ret = __trace_graph_entry(tr, trace, flags, pc);
178 } else
179 ret = 0;
180
181 atomic_dec(&data->disabled);
182 return ret;
183}
184
185static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186{
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
189 unsigned long flags;
190 long disabled;
191 int cpu;
192 int pc;
193
194 cpu = raw_smp_processor_id();
195 if (likely(!per_cpu(tracing_cpu, cpu)))
196 return;
197
198 local_save_flags(flags);
199 /* slight chance to get a false positive on tracing_cpu */
200 if (!irqs_disabled_flags(flags))
201 return;
202
203 data = tr->data[cpu];
204 disabled = atomic_inc_return(&data->disabled);
205
206 if (likely(disabled == 1)) {
207 pc = preempt_count();
208 __trace_graph_return(tr, trace, flags, pc);
209 }
210
211 atomic_dec(&data->disabled);
212}
213
214static void irqsoff_trace_open(struct trace_iterator *iter)
215{
216 if (is_graph())
217 graph_trace_open(iter);
218
219}
220
221static void irqsoff_trace_close(struct trace_iterator *iter)
222{
223 if (iter->private)
224 graph_trace_close(iter);
225}
226
227#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
228 TRACE_GRAPH_PRINT_PROC)
229
230static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200232 /*
233 * In graph mode call the graph tracer output function,
234 * otherwise go with the TRACE_FN event handler
235 */
236 if (is_graph())
Jiri Olsa0a772622010-09-23 14:00:52 +0200237 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200238
239 return TRACE_TYPE_UNHANDLED;
240}
241
242static void irqsoff_print_header(struct seq_file *s)
243{
Jiri Olsa0a772622010-09-23 14:00:52 +0200244 if (is_graph())
245 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
246 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200247 trace_default_header(s);
248}
249
250static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200251__trace_function(struct trace_array *tr,
252 unsigned long ip, unsigned long parent_ip,
253 unsigned long flags, int pc)
254{
Jiri Olsa0a772622010-09-23 14:00:52 +0200255 if (is_graph())
256 trace_graph_function(tr, ip, parent_ip, flags, pc);
257 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200258 trace_function(tr, ip, parent_ip, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200259}
260
261#else
262#define __trace_function trace_function
263
264static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
265{
266 return -EINVAL;
267}
268
269static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
270{
271 return -1;
272}
273
274static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
275{
276 return TRACE_TYPE_UNHANDLED;
277}
278
279static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
280static void irqsoff_print_header(struct seq_file *s) { }
281static void irqsoff_trace_open(struct trace_iterator *iter) { }
282static void irqsoff_trace_close(struct trace_iterator *iter) { }
283#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
284
Steven Rostedt81d68a92008-05-12 21:20:42 +0200285/*
286 * Should this new latency be reported/recorded?
287 */
Ingo Molnare309b412008-05-12 21:20:51 +0200288static int report_latency(cycle_t delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200289{
290 if (tracing_thresh) {
291 if (delta < tracing_thresh)
292 return 0;
293 } else {
294 if (delta <= tracing_max_latency)
295 return 0;
296 }
297 return 1;
298}
299
Ingo Molnare309b412008-05-12 21:20:51 +0200300static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200301check_critical_timing(struct trace_array *tr,
302 struct trace_array_cpu *data,
303 unsigned long parent_ip,
304 int cpu)
305{
Steven Rostedt89b2f972008-05-12 21:20:44 +0200306 cycle_t T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200307 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400308 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200309
Steven Rostedt81d68a92008-05-12 21:20:42 +0200310 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200311 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200312 delta = T1-T0;
313
314 local_save_flags(flags);
315
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400316 pc = preempt_count();
317
Steven Rostedt81d68a92008-05-12 21:20:42 +0200318 if (!report_latency(delta))
319 goto out;
320
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200321 spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200322
Steven Rostedt89b2f972008-05-12 21:20:44 +0200323 /* check if we are still the max latency */
324 if (!report_latency(delta))
325 goto out_unlock;
326
Jiri Olsa62b915f2010-04-02 19:01:22 +0200327 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500328 /* Skip 5 functions to get to the irq/preempt enable function */
329 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200330
Steven Rostedt81d68a92008-05-12 21:20:42 +0200331 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200332 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200333
Steven Rostedt81d68a92008-05-12 21:20:42 +0200334 data->critical_end = parent_ip;
335
Carsten Emdeb5130b12009-09-13 01:43:07 +0200336 if (likely(!is_tracing_stopped())) {
337 tracing_max_latency = delta;
338 update_max_tr_single(tr, current, cpu);
339 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200340
Steven Rostedt81d68a92008-05-12 21:20:42 +0200341 max_sequence++;
342
Steven Rostedt89b2f972008-05-12 21:20:44 +0200343out_unlock:
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200344 spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200345
Steven Rostedt81d68a92008-05-12 21:20:42 +0200346out:
347 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200348 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200349 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200350}
351
Ingo Molnare309b412008-05-12 21:20:51 +0200352static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200353start_critical_timing(unsigned long ip, unsigned long parent_ip)
354{
355 int cpu;
356 struct trace_array *tr = irqsoff_trace;
357 struct trace_array_cpu *data;
358 unsigned long flags;
359
360 if (likely(!tracer_enabled))
361 return;
362
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200363 cpu = raw_smp_processor_id();
364
365 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200366 return;
367
Steven Rostedt81d68a92008-05-12 21:20:42 +0200368 data = tr->data[cpu];
369
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200370 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200371 return;
372
373 atomic_inc(&data->disabled);
374
375 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200376 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200377 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200378
379 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200380
Jiri Olsa62b915f2010-04-02 19:01:22 +0200381 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200382
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200383 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200384
Steven Rostedt81d68a92008-05-12 21:20:42 +0200385 atomic_dec(&data->disabled);
386}
387
Ingo Molnare309b412008-05-12 21:20:51 +0200388static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200389stop_critical_timing(unsigned long ip, unsigned long parent_ip)
390{
391 int cpu;
392 struct trace_array *tr = irqsoff_trace;
393 struct trace_array_cpu *data;
394 unsigned long flags;
395
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200396 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200397 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200398 if (unlikely(per_cpu(tracing_cpu, cpu)))
399 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200400 else
401 return;
402
403 if (!tracer_enabled)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200404 return;
405
Steven Rostedt81d68a92008-05-12 21:20:42 +0200406 data = tr->data[cpu];
407
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400408 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200409 !data->critical_start || atomic_read(&data->disabled))
410 return;
411
412 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200413
Steven Rostedt81d68a92008-05-12 21:20:42 +0200414 local_save_flags(flags);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200415 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200416 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200417 data->critical_start = 0;
418 atomic_dec(&data->disabled);
419}
420
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200421/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200422void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200423{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200424 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200425 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
426}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200427EXPORT_SYMBOL_GPL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200428
Ingo Molnare309b412008-05-12 21:20:51 +0200429void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200430{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200431 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200432 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
433}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200434EXPORT_SYMBOL_GPL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200435
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200436#ifdef CONFIG_IRQSOFF_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +0200437#ifdef CONFIG_PROVE_LOCKING
Ingo Molnare309b412008-05-12 21:20:51 +0200438void time_hardirqs_on(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200439{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200440 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200441 stop_critical_timing(a0, a1);
442}
443
Ingo Molnare309b412008-05-12 21:20:51 +0200444void time_hardirqs_off(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200445{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200446 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200447 start_critical_timing(a0, a1);
448}
449
450#else /* !CONFIG_PROVE_LOCKING */
451
452/*
453 * Stubs:
454 */
455
456void early_boot_irqs_off(void)
457{
458}
459
460void early_boot_irqs_on(void)
461{
462}
463
464void trace_softirqs_on(unsigned long ip)
465{
466}
467
468void trace_softirqs_off(unsigned long ip)
469{
470}
471
Ingo Molnare309b412008-05-12 21:20:51 +0200472inline void print_irqtrace_events(struct task_struct *curr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200473{
474}
475
476/*
477 * We are only interested in hardirq on/off events:
478 */
Ingo Molnare309b412008-05-12 21:20:51 +0200479void trace_hardirqs_on(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200480{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200481 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200482 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
483}
484EXPORT_SYMBOL(trace_hardirqs_on);
485
Ingo Molnare309b412008-05-12 21:20:51 +0200486void trace_hardirqs_off(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200487{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200488 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200489 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
490}
491EXPORT_SYMBOL(trace_hardirqs_off);
492
Ingo Molnare309b412008-05-12 21:20:51 +0200493void trace_hardirqs_on_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200494{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200495 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200496 stop_critical_timing(CALLER_ADDR0, caller_addr);
497}
498EXPORT_SYMBOL(trace_hardirqs_on_caller);
499
Ingo Molnare309b412008-05-12 21:20:51 +0200500void trace_hardirqs_off_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200501{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200502 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200503 start_critical_timing(CALLER_ADDR0, caller_addr);
504}
505EXPORT_SYMBOL(trace_hardirqs_off_caller);
506
507#endif /* CONFIG_PROVE_LOCKING */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200508#endif /* CONFIG_IRQSOFF_TRACER */
509
510#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +0200511void trace_preempt_on(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200512{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400513 if (preempt_trace())
514 stop_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200515}
516
Ingo Molnare309b412008-05-12 21:20:51 +0200517void trace_preempt_off(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200518{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400519 if (preempt_trace())
520 start_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200521}
522#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200523
Jiri Olsa62b915f2010-04-02 19:01:22 +0200524static int start_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200525{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200526 int ret = 0;
527
528 if (!graph)
529 ret = register_ftrace_function(&trace_ops);
530 else
531 ret = register_ftrace_graph(&irqsoff_graph_return,
532 &irqsoff_graph_entry);
533
534 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500535 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500536 else
Steven Rostedt90369902008-11-05 16:05:44 -0500537 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200538
539 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200540}
541
Jiri Olsa62b915f2010-04-02 19:01:22 +0200542static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200543{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200544 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200545
546 if (!graph)
547 unregister_ftrace_function(&trace_ops);
548 else
549 unregister_ftrace_graph();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200550}
551
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200552static void __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200553{
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500554 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
555 trace_flags |= TRACE_ITER_LATENCY_FMT;
556
Steven Rostedt745b1622009-01-15 23:40:11 -0500557 tracing_max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200558 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200559 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200560 smp_wmb();
Steven Rostedt2f26ebd2009-09-01 11:06:29 -0400561 tracing_reset_online_cpus(tr);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200562
563 if (start_irqsoff_tracer(tr, is_graph()))
564 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt81d68a92008-05-12 21:20:42 +0200565}
566
567static void irqsoff_tracer_reset(struct trace_array *tr)
568{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200569 stop_irqsoff_tracer(tr, is_graph());
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500570
571 if (!save_lat_flag)
572 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200573}
574
Steven Rostedt90369902008-11-05 16:05:44 -0500575static void irqsoff_tracer_start(struct trace_array *tr)
576{
Steven Rostedt90369902008-11-05 16:05:44 -0500577 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500578}
579
580static void irqsoff_tracer_stop(struct trace_array *tr)
581{
582 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200583}
584
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200585#ifdef CONFIG_IRQSOFF_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100586static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200587{
588 trace_type = TRACER_IRQS_OFF;
589
590 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100591 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200592}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200593static struct tracer irqsoff_tracer __read_mostly =
594{
595 .name = "irqsoff",
596 .init = irqsoff_tracer_init,
597 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500598 .start = irqsoff_tracer_start,
599 .stop = irqsoff_tracer_stop,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200600 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200601 .print_header = irqsoff_print_header,
602 .print_line = irqsoff_print_line,
603 .flags = &tracer_flags,
604 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200605#ifdef CONFIG_FTRACE_SELFTEST
606 .selftest = trace_selftest_startup_irqsoff,
607#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200608 .open = irqsoff_trace_open,
609 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900610 .use_max_tr = 1,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200611};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200612# define register_irqsoff(trace) register_tracer(&trace)
613#else
614# define register_irqsoff(trace) do { } while (0)
615#endif
616
617#ifdef CONFIG_PREEMPT_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100618static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200619{
620 trace_type = TRACER_PREEMPT_OFF;
621
622 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100623 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200624}
625
626static struct tracer preemptoff_tracer __read_mostly =
627{
628 .name = "preemptoff",
629 .init = preemptoff_tracer_init,
630 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500631 .start = irqsoff_tracer_start,
632 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200633 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200634 .print_header = irqsoff_print_header,
635 .print_line = irqsoff_print_line,
636 .flags = &tracer_flags,
637 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200638#ifdef CONFIG_FTRACE_SELFTEST
639 .selftest = trace_selftest_startup_preemptoff,
640#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200641 .open = irqsoff_trace_open,
642 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900643 .use_max_tr = 1,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200644};
645# define register_preemptoff(trace) register_tracer(&trace)
646#else
647# define register_preemptoff(trace) do { } while (0)
648#endif
649
650#if defined(CONFIG_IRQSOFF_TRACER) && \
651 defined(CONFIG_PREEMPT_TRACER)
652
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100653static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200654{
655 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
656
657 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100658 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200659}
660
661static struct tracer preemptirqsoff_tracer __read_mostly =
662{
663 .name = "preemptirqsoff",
664 .init = preemptirqsoff_tracer_init,
665 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500666 .start = irqsoff_tracer_start,
667 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200668 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200669 .print_header = irqsoff_print_header,
670 .print_line = irqsoff_print_line,
671 .flags = &tracer_flags,
672 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200673#ifdef CONFIG_FTRACE_SELFTEST
674 .selftest = trace_selftest_startup_preemptirqsoff,
675#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200676 .open = irqsoff_trace_open,
677 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900678 .use_max_tr = 1,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200679};
680
681# define register_preemptirqsoff(trace) register_tracer(&trace)
682#else
683# define register_preemptirqsoff(trace) do { } while (0)
684#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200685
686__init static int init_irqsoff_tracer(void)
687{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200688 register_irqsoff(irqsoff_tracer);
689 register_preemptoff(preemptoff_tracer);
690 register_preemptirqsoff(preemptirqsoff_tracer);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200691
692 return 0;
693}
694device_initcall(init_irqsoff_tracer);