Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace irqs off criticall timings |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * From code in the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
| 10 | * Copyright (C) 2004 William Lee Irwin III |
| 11 | */ |
| 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/ftrace.h> |
| 17 | #include <linux/fs.h> |
| 18 | |
| 19 | #include "trace.h" |
| 20 | |
| 21 | static struct trace_array *irqsoff_trace __read_mostly; |
| 22 | static int tracer_enabled __read_mostly; |
| 23 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
| 25 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | static DEFINE_SPINLOCK(max_trace_lock); |
| 27 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | enum { |
| 29 | TRACER_IRQS_OFF = (1 << 1), |
| 30 | TRACER_PREEMPT_OFF = (1 << 2), |
| 31 | }; |
| 32 | |
| 33 | static int trace_type __read_mostly; |
| 34 | |
| 35 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 36 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | preempt_trace(void) |
| 38 | { |
| 39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
| 40 | } |
| 41 | #else |
| 42 | # define preempt_trace() (0) |
| 43 | #endif |
| 44 | |
| 45 | #ifdef CONFIG_IRQSOFF_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 46 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 47 | irq_trace(void) |
| 48 | { |
| 49 | return ((trace_type & TRACER_IRQS_OFF) && |
| 50 | irqs_disabled()); |
| 51 | } |
| 52 | #else |
| 53 | # define irq_trace() (0) |
| 54 | #endif |
| 55 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 56 | /* |
| 57 | * Sequence count - we record it when starting a measurement and |
| 58 | * skip the latency if the sequence has changed - some other section |
| 59 | * did a maximum and could disturb our measurement with serial console |
| 60 | * printouts, etc. Truly coinciding maximum latencies should be rare |
| 61 | * and what happens together happens separately as well, so this doesnt |
| 62 | * decrease the validity of the maximum found: |
| 63 | */ |
| 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
| 65 | |
| 66 | #ifdef CONFIG_FTRACE |
| 67 | /* |
| 68 | * irqsoff uses its own tracer function to keep the overhead down: |
| 69 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 70 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
| 72 | { |
| 73 | struct trace_array *tr = irqsoff_trace; |
| 74 | struct trace_array_cpu *data; |
| 75 | unsigned long flags; |
| 76 | long disabled; |
| 77 | int cpu; |
| 78 | |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 79 | /* |
| 80 | * Does not matter if we preempt. We test the flags |
| 81 | * afterward, to see if irqs are disabled or not. |
| 82 | * If we preempt and get a false positive, the flags |
| 83 | * test will fail. |
| 84 | */ |
| 85 | cpu = raw_smp_processor_id(); |
| 86 | if (likely(!per_cpu(tracing_cpu, cpu))) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 87 | return; |
| 88 | |
| 89 | local_save_flags(flags); |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 90 | /* slight chance to get a false positive on tracing_cpu */ |
| 91 | if (!irqs_disabled_flags(flags)) |
| 92 | return; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 93 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 94 | data = tr->data[cpu]; |
| 95 | disabled = atomic_inc_return(&data->disabled); |
| 96 | |
| 97 | if (likely(disabled == 1)) |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 98 | trace_function(tr, data, ip, parent_ip, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 99 | |
| 100 | atomic_dec(&data->disabled); |
| 101 | } |
| 102 | |
| 103 | static struct ftrace_ops trace_ops __read_mostly = |
| 104 | { |
| 105 | .func = irqsoff_tracer_call, |
| 106 | }; |
| 107 | #endif /* CONFIG_FTRACE */ |
| 108 | |
| 109 | /* |
| 110 | * Should this new latency be reported/recorded? |
| 111 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 112 | static int report_latency(cycle_t delta) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 113 | { |
| 114 | if (tracing_thresh) { |
| 115 | if (delta < tracing_thresh) |
| 116 | return 0; |
| 117 | } else { |
| 118 | if (delta <= tracing_max_latency) |
| 119 | return 0; |
| 120 | } |
| 121 | return 1; |
| 122 | } |
| 123 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 124 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 125 | check_critical_timing(struct trace_array *tr, |
| 126 | struct trace_array_cpu *data, |
| 127 | unsigned long parent_ip, |
| 128 | int cpu) |
| 129 | { |
| 130 | unsigned long latency, t0, t1; |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 131 | cycle_t T0, T1, delta; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 132 | unsigned long flags; |
| 133 | |
| 134 | /* |
| 135 | * usecs conversion is slow so we try to delay the conversion |
| 136 | * as long as possible: |
| 137 | */ |
| 138 | T0 = data->preempt_timestamp; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 139 | T1 = ftrace_now(cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 140 | delta = T1-T0; |
| 141 | |
| 142 | local_save_flags(flags); |
| 143 | |
| 144 | if (!report_latency(delta)) |
| 145 | goto out; |
| 146 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 147 | spin_lock_irqsave(&max_trace_lock, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 148 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 149 | /* check if we are still the max latency */ |
| 150 | if (!report_latency(delta)) |
| 151 | goto out_unlock; |
| 152 | |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 154 | |
| 155 | latency = nsecs_to_usecs(delta); |
| 156 | |
| 157 | if (data->critical_sequence != max_sequence) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 158 | goto out_unlock; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 159 | |
| 160 | tracing_max_latency = delta; |
| 161 | t0 = nsecs_to_usecs(T0); |
| 162 | t1 = nsecs_to_usecs(T1); |
| 163 | |
| 164 | data->critical_end = parent_ip; |
| 165 | |
| 166 | update_max_tr_single(tr, current, cpu); |
| 167 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 168 | max_sequence++; |
| 169 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 170 | out_unlock: |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 171 | spin_unlock_irqrestore(&max_trace_lock, flags); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 172 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 173 | out: |
| 174 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 175 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame^] | 176 | tracing_reset(tr, cpu); |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 177 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 178 | } |
| 179 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 180 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 181 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 182 | { |
| 183 | int cpu; |
| 184 | struct trace_array *tr = irqsoff_trace; |
| 185 | struct trace_array_cpu *data; |
| 186 | unsigned long flags; |
| 187 | |
| 188 | if (likely(!tracer_enabled)) |
| 189 | return; |
| 190 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 191 | cpu = raw_smp_processor_id(); |
| 192 | |
| 193 | if (per_cpu(tracing_cpu, cpu)) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 194 | return; |
| 195 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 196 | data = tr->data[cpu]; |
| 197 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 198 | if (unlikely(!data) || atomic_read(&data->disabled)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 199 | return; |
| 200 | |
| 201 | atomic_inc(&data->disabled); |
| 202 | |
| 203 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 204 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 205 | data->critical_start = parent_ip ? : ip; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame^] | 206 | tracing_reset(tr, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 207 | |
| 208 | local_save_flags(flags); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 209 | |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 210 | trace_function(tr, data, ip, parent_ip, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 211 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 212 | per_cpu(tracing_cpu, cpu) = 1; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 213 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 214 | atomic_dec(&data->disabled); |
| 215 | } |
| 216 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 217 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 218 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 219 | { |
| 220 | int cpu; |
| 221 | struct trace_array *tr = irqsoff_trace; |
| 222 | struct trace_array_cpu *data; |
| 223 | unsigned long flags; |
| 224 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 225 | cpu = raw_smp_processor_id(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 226 | /* Always clear the tracing cpu on stopping the trace */ |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 227 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
| 228 | per_cpu(tracing_cpu, cpu) = 0; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 229 | else |
| 230 | return; |
| 231 | |
| 232 | if (!tracer_enabled) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 233 | return; |
| 234 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 235 | data = tr->data[cpu]; |
| 236 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame^] | 237 | if (unlikely(!data) || |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 238 | !data->critical_start || atomic_read(&data->disabled)) |
| 239 | return; |
| 240 | |
| 241 | atomic_inc(&data->disabled); |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 242 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 243 | local_save_flags(flags); |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 244 | trace_function(tr, data, ip, parent_ip, flags); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 245 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 246 | data->critical_start = 0; |
| 247 | atomic_dec(&data->disabled); |
| 248 | } |
| 249 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 250 | /* start and stop critical timings used to for stoppage (in idle) */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 251 | void start_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 252 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 253 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 255 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 256 | EXPORT_SYMBOL_GPL(start_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 257 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 258 | void stop_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 259 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 260 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 261 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 262 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 263 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 264 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 265 | #ifdef CONFIG_IRQSOFF_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 266 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 267 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 268 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 269 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 270 | stop_critical_timing(a0, a1); |
| 271 | } |
| 272 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 273 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 274 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 275 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 276 | start_critical_timing(a0, a1); |
| 277 | } |
| 278 | |
| 279 | #else /* !CONFIG_PROVE_LOCKING */ |
| 280 | |
| 281 | /* |
| 282 | * Stubs: |
| 283 | */ |
| 284 | |
| 285 | void early_boot_irqs_off(void) |
| 286 | { |
| 287 | } |
| 288 | |
| 289 | void early_boot_irqs_on(void) |
| 290 | { |
| 291 | } |
| 292 | |
| 293 | void trace_softirqs_on(unsigned long ip) |
| 294 | { |
| 295 | } |
| 296 | |
| 297 | void trace_softirqs_off(unsigned long ip) |
| 298 | { |
| 299 | } |
| 300 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 301 | inline void print_irqtrace_events(struct task_struct *curr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 302 | { |
| 303 | } |
| 304 | |
| 305 | /* |
| 306 | * We are only interested in hardirq on/off events: |
| 307 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 308 | void trace_hardirqs_on(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 309 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 310 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 311 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 312 | } |
| 313 | EXPORT_SYMBOL(trace_hardirqs_on); |
| 314 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 315 | void trace_hardirqs_off(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 316 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 317 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 318 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 319 | } |
| 320 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 321 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 322 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 323 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 324 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 325 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 326 | } |
| 327 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 328 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 329 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 330 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 331 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 332 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| 333 | } |
| 334 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
| 335 | |
| 336 | #endif /* CONFIG_PROVE_LOCKING */ |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 337 | #endif /* CONFIG_IRQSOFF_TRACER */ |
| 338 | |
| 339 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 340 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 341 | { |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 342 | if (preempt_trace()) |
| 343 | stop_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 344 | } |
| 345 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 346 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 347 | { |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 348 | if (preempt_trace()) |
| 349 | start_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 350 | } |
| 351 | #endif /* CONFIG_PREEMPT_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 352 | |
| 353 | static void start_irqsoff_tracer(struct trace_array *tr) |
| 354 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 355 | register_ftrace_function(&trace_ops); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 356 | tracer_enabled = 1; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | static void stop_irqsoff_tracer(struct trace_array *tr) |
| 360 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 361 | tracer_enabled = 0; |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 362 | unregister_ftrace_function(&trace_ops); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 363 | } |
| 364 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 365 | static void __irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 366 | { |
| 367 | irqsoff_trace = tr; |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 368 | /* make sure that the tracer is visible */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 369 | smp_wmb(); |
| 370 | |
| 371 | if (tr->ctrl) |
| 372 | start_irqsoff_tracer(tr); |
| 373 | } |
| 374 | |
| 375 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 376 | { |
| 377 | if (tr->ctrl) |
| 378 | stop_irqsoff_tracer(tr); |
| 379 | } |
| 380 | |
| 381 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) |
| 382 | { |
| 383 | if (tr->ctrl) |
| 384 | start_irqsoff_tracer(tr); |
| 385 | else |
| 386 | stop_irqsoff_tracer(tr); |
| 387 | } |
| 388 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 389 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 390 | { |
| 391 | /* stop the trace while dumping */ |
| 392 | if (iter->tr->ctrl) |
| 393 | stop_irqsoff_tracer(iter->tr); |
| 394 | } |
| 395 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 396 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 397 | { |
| 398 | if (iter->tr->ctrl) |
| 399 | start_irqsoff_tracer(iter->tr); |
| 400 | } |
| 401 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 402 | #ifdef CONFIG_IRQSOFF_TRACER |
| 403 | static void irqsoff_tracer_init(struct trace_array *tr) |
| 404 | { |
| 405 | trace_type = TRACER_IRQS_OFF; |
| 406 | |
| 407 | __irqsoff_tracer_init(tr); |
| 408 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 409 | static struct tracer irqsoff_tracer __read_mostly = |
| 410 | { |
| 411 | .name = "irqsoff", |
| 412 | .init = irqsoff_tracer_init, |
| 413 | .reset = irqsoff_tracer_reset, |
| 414 | .open = irqsoff_tracer_open, |
| 415 | .close = irqsoff_tracer_close, |
| 416 | .ctrl_update = irqsoff_tracer_ctrl_update, |
| 417 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 418 | #ifdef CONFIG_FTRACE_SELFTEST |
| 419 | .selftest = trace_selftest_startup_irqsoff, |
| 420 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 421 | }; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 422 | # define register_irqsoff(trace) register_tracer(&trace) |
| 423 | #else |
| 424 | # define register_irqsoff(trace) do { } while (0) |
| 425 | #endif |
| 426 | |
| 427 | #ifdef CONFIG_PREEMPT_TRACER |
| 428 | static void preemptoff_tracer_init(struct trace_array *tr) |
| 429 | { |
| 430 | trace_type = TRACER_PREEMPT_OFF; |
| 431 | |
| 432 | __irqsoff_tracer_init(tr); |
| 433 | } |
| 434 | |
| 435 | static struct tracer preemptoff_tracer __read_mostly = |
| 436 | { |
| 437 | .name = "preemptoff", |
| 438 | .init = preemptoff_tracer_init, |
| 439 | .reset = irqsoff_tracer_reset, |
| 440 | .open = irqsoff_tracer_open, |
| 441 | .close = irqsoff_tracer_close, |
| 442 | .ctrl_update = irqsoff_tracer_ctrl_update, |
| 443 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 444 | #ifdef CONFIG_FTRACE_SELFTEST |
| 445 | .selftest = trace_selftest_startup_preemptoff, |
| 446 | #endif |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 447 | }; |
| 448 | # define register_preemptoff(trace) register_tracer(&trace) |
| 449 | #else |
| 450 | # define register_preemptoff(trace) do { } while (0) |
| 451 | #endif |
| 452 | |
| 453 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
| 454 | defined(CONFIG_PREEMPT_TRACER) |
| 455 | |
| 456 | static void preemptirqsoff_tracer_init(struct trace_array *tr) |
| 457 | { |
| 458 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
| 459 | |
| 460 | __irqsoff_tracer_init(tr); |
| 461 | } |
| 462 | |
| 463 | static struct tracer preemptirqsoff_tracer __read_mostly = |
| 464 | { |
| 465 | .name = "preemptirqsoff", |
| 466 | .init = preemptirqsoff_tracer_init, |
| 467 | .reset = irqsoff_tracer_reset, |
| 468 | .open = irqsoff_tracer_open, |
| 469 | .close = irqsoff_tracer_close, |
| 470 | .ctrl_update = irqsoff_tracer_ctrl_update, |
| 471 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 472 | #ifdef CONFIG_FTRACE_SELFTEST |
| 473 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 474 | #endif |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 475 | }; |
| 476 | |
| 477 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
| 478 | #else |
| 479 | # define register_preemptirqsoff(trace) do { } while (0) |
| 480 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 481 | |
| 482 | __init static int init_irqsoff_tracer(void) |
| 483 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 484 | register_irqsoff(irqsoff_tracer); |
| 485 | register_preemptoff(preemptoff_tracer); |
| 486 | register_preemptirqsoff(preemptirqsoff_tracer); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | device_initcall(init_irqsoff_tracer); |