Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace irqs off criticall timings |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * From code in the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
| 10 | * Copyright (C) 2004 William Lee Irwin III |
| 11 | */ |
| 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/ftrace.h> |
| 17 | #include <linux/fs.h> |
| 18 | |
| 19 | #include "trace.h" |
| 20 | |
| 21 | static struct trace_array *irqsoff_trace __read_mostly; |
| 22 | static int tracer_enabled __read_mostly; |
| 23 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | static DEFINE_PER_CPU(int, tracing_cpu); |
| 25 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | static DEFINE_SPINLOCK(max_trace_lock); |
| 27 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | enum { |
| 29 | TRACER_IRQS_OFF = (1 << 1), |
| 30 | TRACER_PREEMPT_OFF = (1 << 2), |
| 31 | }; |
| 32 | |
| 33 | static int trace_type __read_mostly; |
| 34 | |
| 35 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 36 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | preempt_trace(void) |
| 38 | { |
| 39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
| 40 | } |
| 41 | #else |
| 42 | # define preempt_trace() (0) |
| 43 | #endif |
| 44 | |
| 45 | #ifdef CONFIG_IRQSOFF_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 46 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 47 | irq_trace(void) |
| 48 | { |
| 49 | return ((trace_type & TRACER_IRQS_OFF) && |
| 50 | irqs_disabled()); |
| 51 | } |
| 52 | #else |
| 53 | # define irq_trace() (0) |
| 54 | #endif |
| 55 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 56 | /* |
| 57 | * Sequence count - we record it when starting a measurement and |
| 58 | * skip the latency if the sequence has changed - some other section |
| 59 | * did a maximum and could disturb our measurement with serial console |
| 60 | * printouts, etc. Truly coinciding maximum latencies should be rare |
| 61 | * and what happens together happens separately as well, so this doesnt |
| 62 | * decrease the validity of the maximum found: |
| 63 | */ |
| 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
| 65 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 66 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 67 | /* |
| 68 | * irqsoff uses its own tracer function to keep the overhead down: |
| 69 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 70 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
| 72 | { |
| 73 | struct trace_array *tr = irqsoff_trace; |
| 74 | struct trace_array_cpu *data; |
| 75 | unsigned long flags; |
| 76 | long disabled; |
| 77 | int cpu; |
| 78 | |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 79 | /* |
| 80 | * Does not matter if we preempt. We test the flags |
| 81 | * afterward, to see if irqs are disabled or not. |
| 82 | * If we preempt and get a false positive, the flags |
| 83 | * test will fail. |
| 84 | */ |
| 85 | cpu = raw_smp_processor_id(); |
| 86 | if (likely(!per_cpu(tracing_cpu, cpu))) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 87 | return; |
| 88 | |
| 89 | local_save_flags(flags); |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 90 | /* slight chance to get a false positive on tracing_cpu */ |
| 91 | if (!irqs_disabled_flags(flags)) |
| 92 | return; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 93 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 94 | data = tr->data[cpu]; |
| 95 | disabled = atomic_inc_return(&data->disabled); |
| 96 | |
| 97 | if (likely(disabled == 1)) |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 98 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 99 | |
| 100 | atomic_dec(&data->disabled); |
| 101 | } |
| 102 | |
| 103 | static struct ftrace_ops trace_ops __read_mostly = |
| 104 | { |
| 105 | .func = irqsoff_tracer_call, |
| 106 | }; |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 107 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Should this new latency be reported/recorded? |
| 111 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 112 | static int report_latency(cycle_t delta) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 113 | { |
| 114 | if (tracing_thresh) { |
| 115 | if (delta < tracing_thresh) |
| 116 | return 0; |
| 117 | } else { |
| 118 | if (delta <= tracing_max_latency) |
| 119 | return 0; |
| 120 | } |
| 121 | return 1; |
| 122 | } |
| 123 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 124 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 125 | check_critical_timing(struct trace_array *tr, |
| 126 | struct trace_array_cpu *data, |
| 127 | unsigned long parent_ip, |
| 128 | int cpu) |
| 129 | { |
| 130 | unsigned long latency, t0, t1; |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 131 | cycle_t T0, T1, delta; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 132 | unsigned long flags; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 133 | int pc; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 134 | |
| 135 | /* |
| 136 | * usecs conversion is slow so we try to delay the conversion |
| 137 | * as long as possible: |
| 138 | */ |
| 139 | T0 = data->preempt_timestamp; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 140 | T1 = ftrace_now(cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 141 | delta = T1-T0; |
| 142 | |
| 143 | local_save_flags(flags); |
| 144 | |
Steven Rostedt | 6450c1d | 2008-10-02 19:23:04 -0400 | [diff] [blame] | 145 | pc = preempt_count(); |
| 146 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 147 | if (!report_latency(delta)) |
| 148 | goto out; |
| 149 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 150 | spin_lock_irqsave(&max_trace_lock, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 151 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 152 | /* check if we are still the max latency */ |
| 153 | if (!report_latency(delta)) |
| 154 | goto out_unlock; |
| 155 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 156 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 157 | |
| 158 | latency = nsecs_to_usecs(delta); |
| 159 | |
| 160 | if (data->critical_sequence != max_sequence) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 161 | goto out_unlock; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 162 | |
| 163 | tracing_max_latency = delta; |
| 164 | t0 = nsecs_to_usecs(T0); |
| 165 | t1 = nsecs_to_usecs(T1); |
| 166 | |
| 167 | data->critical_end = parent_ip; |
| 168 | |
| 169 | update_max_tr_single(tr, current, cpu); |
| 170 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 171 | max_sequence++; |
| 172 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 173 | out_unlock: |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 174 | spin_unlock_irqrestore(&max_trace_lock, flags); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 175 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 176 | out: |
| 177 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 178 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 179 | tracing_reset(tr, cpu); |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 180 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 181 | } |
| 182 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 183 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 184 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 185 | { |
| 186 | int cpu; |
| 187 | struct trace_array *tr = irqsoff_trace; |
| 188 | struct trace_array_cpu *data; |
| 189 | unsigned long flags; |
| 190 | |
| 191 | if (likely(!tracer_enabled)) |
| 192 | return; |
| 193 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 194 | cpu = raw_smp_processor_id(); |
| 195 | |
| 196 | if (per_cpu(tracing_cpu, cpu)) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 197 | return; |
| 198 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 199 | data = tr->data[cpu]; |
| 200 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 201 | if (unlikely(!data) || atomic_read(&data->disabled)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 202 | return; |
| 203 | |
| 204 | atomic_inc(&data->disabled); |
| 205 | |
| 206 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 207 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 208 | data->critical_start = parent_ip ? : ip; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 209 | tracing_reset(tr, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 210 | |
| 211 | local_save_flags(flags); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 212 | |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 213 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 214 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 215 | per_cpu(tracing_cpu, cpu) = 1; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 216 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 217 | atomic_dec(&data->disabled); |
| 218 | } |
| 219 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 220 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 221 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 222 | { |
| 223 | int cpu; |
| 224 | struct trace_array *tr = irqsoff_trace; |
| 225 | struct trace_array_cpu *data; |
| 226 | unsigned long flags; |
| 227 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 228 | cpu = raw_smp_processor_id(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 229 | /* Always clear the tracing cpu on stopping the trace */ |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 230 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
| 231 | per_cpu(tracing_cpu, cpu) = 0; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 232 | else |
| 233 | return; |
| 234 | |
| 235 | if (!tracer_enabled) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 236 | return; |
| 237 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 238 | data = tr->data[cpu]; |
| 239 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 240 | if (unlikely(!data) || |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | !data->critical_start || atomic_read(&data->disabled)) |
| 242 | return; |
| 243 | |
| 244 | atomic_inc(&data->disabled); |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 245 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 246 | local_save_flags(flags); |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 247 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 248 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 249 | data->critical_start = 0; |
| 250 | atomic_dec(&data->disabled); |
| 251 | } |
| 252 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 253 | /* start and stop critical timings used to for stoppage (in idle) */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 254 | void start_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 255 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 256 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 257 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 258 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(start_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 260 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 261 | void stop_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 262 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 263 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 264 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 265 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 266 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 267 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 268 | #ifdef CONFIG_IRQSOFF_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 269 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 270 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 271 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 272 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 273 | stop_critical_timing(a0, a1); |
| 274 | } |
| 275 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 276 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 277 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 278 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 279 | start_critical_timing(a0, a1); |
| 280 | } |
| 281 | |
| 282 | #else /* !CONFIG_PROVE_LOCKING */ |
| 283 | |
| 284 | /* |
| 285 | * Stubs: |
| 286 | */ |
| 287 | |
| 288 | void early_boot_irqs_off(void) |
| 289 | { |
| 290 | } |
| 291 | |
| 292 | void early_boot_irqs_on(void) |
| 293 | { |
| 294 | } |
| 295 | |
| 296 | void trace_softirqs_on(unsigned long ip) |
| 297 | { |
| 298 | } |
| 299 | |
| 300 | void trace_softirqs_off(unsigned long ip) |
| 301 | { |
| 302 | } |
| 303 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 304 | inline void print_irqtrace_events(struct task_struct *curr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 305 | { |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * We are only interested in hardirq on/off events: |
| 310 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 311 | void trace_hardirqs_on(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 312 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 313 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 314 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 315 | } |
| 316 | EXPORT_SYMBOL(trace_hardirqs_on); |
| 317 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 318 | void trace_hardirqs_off(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 319 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 320 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 321 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 322 | } |
| 323 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 324 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 325 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 326 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 327 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 328 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 329 | } |
| 330 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 331 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 332 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 333 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 334 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 335 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| 336 | } |
| 337 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
| 338 | |
| 339 | #endif /* CONFIG_PROVE_LOCKING */ |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 340 | #endif /* CONFIG_IRQSOFF_TRACER */ |
| 341 | |
| 342 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 343 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 344 | { |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 345 | if (preempt_trace()) |
| 346 | stop_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 347 | } |
| 348 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 349 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 350 | { |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 351 | if (preempt_trace()) |
| 352 | start_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 353 | } |
| 354 | #endif /* CONFIG_PREEMPT_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 355 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 356 | /* |
| 357 | * save_tracer_enabled is used to save the state of the tracer_enabled |
| 358 | * variable when we disable it when we open a trace output file. |
| 359 | */ |
| 360 | static int save_tracer_enabled; |
| 361 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 362 | static void start_irqsoff_tracer(struct trace_array *tr) |
| 363 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 364 | register_ftrace_function(&trace_ops); |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 365 | if (tracing_is_enabled()) { |
| 366 | tracer_enabled = 1; |
| 367 | save_tracer_enabled = 1; |
| 368 | } else { |
| 369 | tracer_enabled = 0; |
| 370 | save_tracer_enabled = 0; |
| 371 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | static void stop_irqsoff_tracer(struct trace_array *tr) |
| 375 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 376 | tracer_enabled = 0; |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 377 | save_tracer_enabled = 0; |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 378 | unregister_ftrace_function(&trace_ops); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 379 | } |
| 380 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 381 | static void __irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 382 | { |
| 383 | irqsoff_trace = tr; |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 384 | /* make sure that the tracer is visible */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 385 | smp_wmb(); |
Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 386 | start_irqsoff_tracer(tr); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 387 | } |
| 388 | |
| 389 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 390 | { |
Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 391 | stop_irqsoff_tracer(tr); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 392 | } |
| 393 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 394 | static void irqsoff_tracer_start(struct trace_array *tr) |
| 395 | { |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 396 | tracer_enabled = 1; |
| 397 | save_tracer_enabled = 1; |
| 398 | } |
| 399 | |
| 400 | static void irqsoff_tracer_stop(struct trace_array *tr) |
| 401 | { |
| 402 | tracer_enabled = 0; |
| 403 | save_tracer_enabled = 0; |
| 404 | } |
| 405 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 406 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 407 | { |
| 408 | /* stop the trace while dumping */ |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 409 | tracer_enabled = 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 410 | } |
| 411 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 412 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 413 | { |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 414 | /* restart tracing */ |
| 415 | tracer_enabled = save_tracer_enabled; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 416 | } |
| 417 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
| 419 | static void irqsoff_tracer_init(struct trace_array *tr) |
| 420 | { |
| 421 | trace_type = TRACER_IRQS_OFF; |
| 422 | |
| 423 | __irqsoff_tracer_init(tr); |
| 424 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 425 | static struct tracer irqsoff_tracer __read_mostly = |
| 426 | { |
| 427 | .name = "irqsoff", |
| 428 | .init = irqsoff_tracer_init, |
| 429 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 430 | .start = irqsoff_tracer_start, |
| 431 | .stop = irqsoff_tracer_stop, |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 432 | .open = irqsoff_tracer_open, |
| 433 | .close = irqsoff_tracer_close, |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 434 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 435 | #ifdef CONFIG_FTRACE_SELFTEST |
| 436 | .selftest = trace_selftest_startup_irqsoff, |
| 437 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 438 | }; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 439 | # define register_irqsoff(trace) register_tracer(&trace) |
| 440 | #else |
| 441 | # define register_irqsoff(trace) do { } while (0) |
| 442 | #endif |
| 443 | |
| 444 | #ifdef CONFIG_PREEMPT_TRACER |
| 445 | static void preemptoff_tracer_init(struct trace_array *tr) |
| 446 | { |
| 447 | trace_type = TRACER_PREEMPT_OFF; |
| 448 | |
| 449 | __irqsoff_tracer_init(tr); |
| 450 | } |
| 451 | |
| 452 | static struct tracer preemptoff_tracer __read_mostly = |
| 453 | { |
| 454 | .name = "preemptoff", |
| 455 | .init = preemptoff_tracer_init, |
| 456 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 457 | .start = irqsoff_tracer_start, |
| 458 | .stop = irqsoff_tracer_stop, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 459 | .open = irqsoff_tracer_open, |
| 460 | .close = irqsoff_tracer_close, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 461 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 462 | #ifdef CONFIG_FTRACE_SELFTEST |
| 463 | .selftest = trace_selftest_startup_preemptoff, |
| 464 | #endif |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 465 | }; |
| 466 | # define register_preemptoff(trace) register_tracer(&trace) |
| 467 | #else |
| 468 | # define register_preemptoff(trace) do { } while (0) |
| 469 | #endif |
| 470 | |
| 471 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
| 472 | defined(CONFIG_PREEMPT_TRACER) |
| 473 | |
| 474 | static void preemptirqsoff_tracer_init(struct trace_array *tr) |
| 475 | { |
| 476 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
| 477 | |
| 478 | __irqsoff_tracer_init(tr); |
| 479 | } |
| 480 | |
| 481 | static struct tracer preemptirqsoff_tracer __read_mostly = |
| 482 | { |
| 483 | .name = "preemptirqsoff", |
| 484 | .init = preemptirqsoff_tracer_init, |
| 485 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 486 | .start = irqsoff_tracer_start, |
| 487 | .stop = irqsoff_tracer_stop, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 488 | .open = irqsoff_tracer_open, |
| 489 | .close = irqsoff_tracer_close, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 490 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 491 | #ifdef CONFIG_FTRACE_SELFTEST |
| 492 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 493 | #endif |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 494 | }; |
| 495 | |
| 496 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
| 497 | #else |
| 498 | # define register_preemptirqsoff(trace) do { } while (0) |
| 499 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 500 | |
| 501 | __init static int init_irqsoff_tracer(void) |
| 502 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 503 | register_irqsoff(irqsoff_tracer); |
| 504 | register_preemptoff(preemptoff_tracer); |
| 505 | register_preemptirqsoff(preemptirqsoff_tracer); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 506 | |
| 507 | return 0; |
| 508 | } |
| 509 | device_initcall(init_irqsoff_tracer); |