Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 2 | * trace irqs off critical timings |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * From code in the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 11 | */ |
| 12 | #include <linux/kallsyms.h> |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 13 | #include <linux/uaccess.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/ftrace.h> |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 16 | |
| 17 | #include "trace.h" |
| 18 | |
| 19 | static struct trace_array *irqsoff_trace __read_mostly; |
| 20 | static int tracer_enabled __read_mostly; |
| 21 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 22 | static DEFINE_PER_CPU(int, tracing_cpu); |
| 23 | |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 24 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 25 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 26 | enum { |
| 27 | TRACER_IRQS_OFF = (1 << 1), |
| 28 | TRACER_PREEMPT_OFF = (1 << 2), |
| 29 | }; |
| 30 | |
| 31 | static int trace_type __read_mostly; |
| 32 | |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 33 | static int save_flags; |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 34 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 35 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
| 36 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
| 37 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 38 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 39 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 40 | preempt_trace(void) |
| 41 | { |
| 42 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
| 43 | } |
| 44 | #else |
| 45 | # define preempt_trace() (0) |
| 46 | #endif |
| 47 | |
| 48 | #ifdef CONFIG_IRQSOFF_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 49 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 50 | irq_trace(void) |
| 51 | { |
| 52 | return ((trace_type & TRACER_IRQS_OFF) && |
| 53 | irqs_disabled()); |
| 54 | } |
| 55 | #else |
| 56 | # define irq_trace() (0) |
| 57 | #endif |
| 58 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 60 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 61 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 62 | #else |
| 63 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) |
| 64 | { |
| 65 | return -EINVAL; |
| 66 | } |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 67 | # define is_graph(tr) false |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 68 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 69 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 70 | /* |
| 71 | * Sequence count - we record it when starting a measurement and |
| 72 | * skip the latency if the sequence has changed - some other section |
| 73 | * did a maximum and could disturb our measurement with serial console |
| 74 | * printouts, etc. Truly coinciding maximum latencies should be rare |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 75 | * and what happens together happens separately as well, so this doesn't |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 76 | * decrease the validity of the maximum found: |
| 77 | */ |
| 78 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
| 79 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 80 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 81 | /* |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 82 | * Prologue for the preempt and irqs off function tracers. |
| 83 | * |
| 84 | * Returns 1 if it is OK to continue, and data->disabled is |
| 85 | * incremented. |
| 86 | * 0 if the trace is to be ignored, and data->disabled |
| 87 | * is kept the same. |
| 88 | * |
| 89 | * Note, this function is also used outside this ifdef but |
| 90 | * inside the #ifdef of the function graph tracer below. |
| 91 | * This is OK, since the function graph tracer is |
| 92 | * dependent on the function tracer. |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 93 | */ |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 94 | static int func_prolog_dec(struct trace_array *tr, |
| 95 | struct trace_array_cpu **data, |
| 96 | unsigned long *flags) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 97 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 98 | long disabled; |
| 99 | int cpu; |
| 100 | |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 101 | /* |
| 102 | * Does not matter if we preempt. We test the flags |
| 103 | * afterward, to see if irqs are disabled or not. |
| 104 | * If we preempt and get a false positive, the flags |
| 105 | * test will fail. |
| 106 | */ |
| 107 | cpu = raw_smp_processor_id(); |
| 108 | if (likely(!per_cpu(tracing_cpu, cpu))) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 109 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 110 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 111 | local_save_flags(*flags); |
Steven Rostedt (Red Hat) | cb86e05 | 2016-03-18 12:27:43 -0400 | [diff] [blame] | 112 | /* |
| 113 | * Slight chance to get a false positive on tracing_cpu, |
| 114 | * although I'm starting to think there isn't a chance. |
| 115 | * Leave this for now just to be paranoid. |
| 116 | */ |
| 117 | if (!irqs_disabled_flags(*flags) && !preempt_count()) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 118 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 119 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 120 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 121 | disabled = atomic_inc_return(&(*data)->disabled); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 122 | |
| 123 | if (likely(disabled == 1)) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 124 | return 1; |
| 125 | |
| 126 | atomic_dec(&(*data)->disabled); |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * irqsoff uses its own tracer function to keep the overhead down: |
| 133 | */ |
| 134 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 135 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 136 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 137 | { |
| 138 | struct trace_array *tr = irqsoff_trace; |
| 139 | struct trace_array_cpu *data; |
| 140 | unsigned long flags; |
| 141 | |
| 142 | if (!func_prolog_dec(tr, &data, &flags)) |
| 143 | return; |
| 144 | |
| 145 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 146 | |
| 147 | atomic_dec(&data->disabled); |
| 148 | } |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 149 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 150 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 151 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 152 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 153 | { |
| 154 | int cpu; |
| 155 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 156 | if (!(is_graph(tr) ^ set)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 157 | return 0; |
| 158 | |
| 159 | stop_irqsoff_tracer(irqsoff_trace, !set); |
| 160 | |
| 161 | for_each_possible_cpu(cpu) |
| 162 | per_cpu(tracing_cpu, cpu) = 0; |
| 163 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 164 | tr->max_latency = 0; |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 165 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 166 | |
| 167 | return start_irqsoff_tracer(irqsoff_trace, set); |
| 168 | } |
| 169 | |
| 170 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
| 171 | { |
| 172 | struct trace_array *tr = irqsoff_trace; |
| 173 | struct trace_array_cpu *data; |
| 174 | unsigned long flags; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 175 | int ret; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 176 | int pc; |
| 177 | |
Steven Rostedt (Red Hat) | 1a41442 | 2016-12-08 19:28:28 -0500 | [diff] [blame] | 178 | if (ftrace_graph_ignore_func(trace)) |
| 179 | return 0; |
| 180 | /* |
| 181 | * Do not trace a function if it's filtered by set_graph_notrace. |
| 182 | * Make the index of ret stack negative to indicate that it should |
| 183 | * ignore further functions. But it needs its own ret stack entry |
| 184 | * to recover the original index in order to continue tracing after |
| 185 | * returning from the function. |
| 186 | */ |
| 187 | if (ftrace_graph_notrace_addr(trace->func)) |
| 188 | return 1; |
| 189 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 190 | if (!func_prolog_dec(tr, &data, &flags)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 191 | return 0; |
| 192 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 193 | pc = preempt_count(); |
| 194 | ret = __trace_graph_entry(tr, trace, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 195 | atomic_dec(&data->disabled); |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 196 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 197 | return ret; |
| 198 | } |
| 199 | |
| 200 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) |
| 201 | { |
| 202 | struct trace_array *tr = irqsoff_trace; |
| 203 | struct trace_array_cpu *data; |
| 204 | unsigned long flags; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 205 | int pc; |
| 206 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 207 | if (!func_prolog_dec(tr, &data, &flags)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 208 | return; |
| 209 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 210 | pc = preempt_count(); |
| 211 | __trace_graph_return(tr, trace, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 212 | atomic_dec(&data->disabled); |
| 213 | } |
| 214 | |
| 215 | static void irqsoff_trace_open(struct trace_iterator *iter) |
| 216 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 217 | if (is_graph(iter->tr)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 218 | graph_trace_open(iter); |
| 219 | |
| 220 | } |
| 221 | |
| 222 | static void irqsoff_trace_close(struct trace_iterator *iter) |
| 223 | { |
| 224 | if (iter->private) |
| 225 | graph_trace_close(iter); |
| 226 | } |
| 227 | |
| 228 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
Jiri Olsa | 321e68b | 2011-06-03 16:58:47 +0200 | [diff] [blame] | 229 | TRACE_GRAPH_PRINT_PROC | \ |
| 230 | TRACE_GRAPH_PRINT_ABS_TIME | \ |
| 231 | TRACE_GRAPH_PRINT_DURATION) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 232 | |
| 233 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
| 234 | { |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 235 | /* |
| 236 | * In graph mode call the graph tracer output function, |
| 237 | * otherwise go with the TRACE_FN event handler |
| 238 | */ |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 239 | if (is_graph(iter->tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 240 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 241 | |
| 242 | return TRACE_TYPE_UNHANDLED; |
| 243 | } |
| 244 | |
| 245 | static void irqsoff_print_header(struct seq_file *s) |
| 246 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 247 | struct trace_array *tr = irqsoff_trace; |
| 248 | |
| 249 | if (is_graph(tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 250 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
| 251 | else |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 252 | trace_default_header(s); |
| 253 | } |
| 254 | |
| 255 | static void |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 256 | __trace_function(struct trace_array *tr, |
| 257 | unsigned long ip, unsigned long parent_ip, |
| 258 | unsigned long flags, int pc) |
| 259 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 260 | if (is_graph(tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 261 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
| 262 | else |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 263 | trace_function(tr, ip, parent_ip, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | #else |
| 267 | #define __trace_function trace_function |
| 268 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 269 | #ifdef CONFIG_FUNCTION_TRACER |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 270 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
| 271 | { |
| 272 | return -1; |
| 273 | } |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 274 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 275 | |
| 276 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
| 277 | { |
| 278 | return TRACE_TYPE_UNHANDLED; |
| 279 | } |
| 280 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 281 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
| 282 | static void irqsoff_trace_close(struct trace_iterator *iter) { } |
Jiri Olsa | 7e9a49e | 2011-11-07 16:08:49 +0100 | [diff] [blame] | 283 | |
| 284 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 285 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
Jiri Olsa | 7e9a49e | 2011-11-07 16:08:49 +0100 | [diff] [blame] | 286 | static void irqsoff_print_header(struct seq_file *s) |
| 287 | { |
| 288 | trace_default_header(s); |
| 289 | } |
| 290 | #else |
| 291 | static void irqsoff_print_header(struct seq_file *s) |
| 292 | { |
| 293 | trace_latency_header(s); |
| 294 | } |
| 295 | #endif /* CONFIG_FUNCTION_TRACER */ |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 296 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 297 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 298 | /* |
| 299 | * Should this new latency be reported/recorded? |
| 300 | */ |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame^] | 301 | static bool report_latency(struct trace_array *tr, u64 delta) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 302 | { |
| 303 | if (tracing_thresh) { |
| 304 | if (delta < tracing_thresh) |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 305 | return false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 306 | } else { |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 307 | if (delta <= tr->max_latency) |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 308 | return false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 309 | } |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 310 | return true; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 311 | } |
| 312 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 313 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 314 | check_critical_timing(struct trace_array *tr, |
| 315 | struct trace_array_cpu *data, |
| 316 | unsigned long parent_ip, |
| 317 | int cpu) |
| 318 | { |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame^] | 319 | u64 T0, T1, delta; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 320 | unsigned long flags; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 321 | int pc; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 322 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 323 | T0 = data->preempt_timestamp; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 324 | T1 = ftrace_now(cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 325 | delta = T1-T0; |
| 326 | |
| 327 | local_save_flags(flags); |
| 328 | |
Steven Rostedt | 6450c1d | 2008-10-02 19:23:04 -0400 | [diff] [blame] | 329 | pc = preempt_count(); |
| 330 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 331 | if (!report_latency(tr, delta)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 332 | goto out; |
| 333 | |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 334 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 335 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 336 | /* check if we are still the max latency */ |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 337 | if (!report_latency(tr, delta)) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 338 | goto out_unlock; |
| 339 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 340 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | cc51a0f | 2009-12-11 11:54:51 -0500 | [diff] [blame] | 341 | /* Skip 5 functions to get to the irq/preempt enable function */ |
| 342 | __trace_stack(tr, flags, 5, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 343 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 344 | if (data->critical_sequence != max_sequence) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 345 | goto out_unlock; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 346 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 347 | data->critical_end = parent_ip; |
| 348 | |
Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 349 | if (likely(!is_tracing_stopped())) { |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 350 | tr->max_latency = delta; |
Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 351 | update_max_tr_single(tr, current, cpu); |
| 352 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 353 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 354 | max_sequence++; |
| 355 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 356 | out_unlock: |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 357 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 358 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 359 | out: |
| 360 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 361 | data->preempt_timestamp = ftrace_now(cpu); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 362 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 363 | } |
| 364 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 365 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 366 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 367 | { |
| 368 | int cpu; |
| 369 | struct trace_array *tr = irqsoff_trace; |
| 370 | struct trace_array_cpu *data; |
| 371 | unsigned long flags; |
| 372 | |
Steven Rostedt (Red Hat) | 10246fa | 2013-07-01 15:58:24 -0400 | [diff] [blame] | 373 | if (!tracer_enabled || !tracing_is_enabled()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 374 | return; |
| 375 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 376 | cpu = raw_smp_processor_id(); |
| 377 | |
| 378 | if (per_cpu(tracing_cpu, cpu)) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 379 | return; |
| 380 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 381 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 382 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 383 | if (unlikely(!data) || atomic_read(&data->disabled)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 384 | return; |
| 385 | |
| 386 | atomic_inc(&data->disabled); |
| 387 | |
| 388 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 389 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 390 | data->critical_start = parent_ip ? : ip; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 391 | |
| 392 | local_save_flags(flags); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 393 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 394 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 395 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 396 | per_cpu(tracing_cpu, cpu) = 1; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 397 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 398 | atomic_dec(&data->disabled); |
| 399 | } |
| 400 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 401 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 402 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 403 | { |
| 404 | int cpu; |
| 405 | struct trace_array *tr = irqsoff_trace; |
| 406 | struct trace_array_cpu *data; |
| 407 | unsigned long flags; |
| 408 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 409 | cpu = raw_smp_processor_id(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 410 | /* Always clear the tracing cpu on stopping the trace */ |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 411 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
| 412 | per_cpu(tracing_cpu, cpu) = 0; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 413 | else |
| 414 | return; |
| 415 | |
Steven Rostedt (Red Hat) | 10246fa | 2013-07-01 15:58:24 -0400 | [diff] [blame] | 416 | if (!tracer_enabled || !tracing_is_enabled()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 417 | return; |
| 418 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 419 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 420 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 421 | if (unlikely(!data) || |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 422 | !data->critical_start || atomic_read(&data->disabled)) |
| 423 | return; |
| 424 | |
| 425 | atomic_inc(&data->disabled); |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 426 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 427 | local_save_flags(flags); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 428 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 429 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 430 | data->critical_start = 0; |
| 431 | atomic_dec(&data->disabled); |
| 432 | } |
| 433 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 434 | /* start and stop critical timings used to for stoppage (in idle) */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 435 | void start_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 436 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 437 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 438 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 439 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 440 | EXPORT_SYMBOL_GPL(start_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 441 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 442 | void stop_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 443 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 444 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 445 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 446 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 447 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 448 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 449 | #ifdef CONFIG_IRQSOFF_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 450 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 451 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 452 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 453 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 454 | stop_critical_timing(a0, a1); |
| 455 | } |
| 456 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 457 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 458 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 459 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 460 | start_critical_timing(a0, a1); |
| 461 | } |
| 462 | |
| 463 | #else /* !CONFIG_PROVE_LOCKING */ |
| 464 | |
| 465 | /* |
| 466 | * Stubs: |
| 467 | */ |
| 468 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 469 | void trace_softirqs_on(unsigned long ip) |
| 470 | { |
| 471 | } |
| 472 | |
| 473 | void trace_softirqs_off(unsigned long ip) |
| 474 | { |
| 475 | } |
| 476 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 477 | inline void print_irqtrace_events(struct task_struct *curr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 478 | { |
| 479 | } |
| 480 | |
| 481 | /* |
| 482 | * We are only interested in hardirq on/off events: |
| 483 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 484 | void trace_hardirqs_on(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 485 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 486 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 487 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 488 | } |
| 489 | EXPORT_SYMBOL(trace_hardirqs_on); |
| 490 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 491 | void trace_hardirqs_off(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 492 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 493 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 494 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 495 | } |
| 496 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 497 | |
Andi Kleen | 285c00a | 2014-02-08 08:52:08 +0100 | [diff] [blame] | 498 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 499 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 500 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 501 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 502 | } |
| 503 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 504 | |
Andi Kleen | 285c00a | 2014-02-08 08:52:08 +0100 | [diff] [blame] | 505 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 506 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 507 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 508 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| 509 | } |
| 510 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
| 511 | |
| 512 | #endif /* CONFIG_PROVE_LOCKING */ |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 513 | #endif /* CONFIG_IRQSOFF_TRACER */ |
| 514 | |
| 515 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 516 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 517 | { |
Steven Rostedt | e36de1d | 2011-09-22 11:11:51 -0400 | [diff] [blame] | 518 | if (preempt_trace() && !irq_trace()) |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 519 | stop_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 520 | } |
| 521 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 522 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 523 | { |
Steven Rostedt | e36de1d | 2011-09-22 11:11:51 -0400 | [diff] [blame] | 524 | if (preempt_trace() && !irq_trace()) |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 525 | start_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 526 | } |
| 527 | #endif /* CONFIG_PREEMPT_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 528 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 529 | #ifdef CONFIG_FUNCTION_TRACER |
| 530 | static bool function_enabled; |
| 531 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 532 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 533 | { |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 534 | int ret; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 535 | |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 536 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 537 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 538 | return 0; |
| 539 | |
| 540 | if (graph) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 541 | ret = register_ftrace_graph(&irqsoff_graph_return, |
| 542 | &irqsoff_graph_entry); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 543 | else |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 544 | ret = register_ftrace_function(tr->ops); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 545 | |
| 546 | if (!ret) |
| 547 | function_enabled = true; |
| 548 | |
| 549 | return ret; |
| 550 | } |
| 551 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 552 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 553 | { |
| 554 | if (!function_enabled) |
| 555 | return; |
| 556 | |
| 557 | if (graph) |
| 558 | unregister_ftrace_graph(); |
| 559 | else |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 560 | unregister_ftrace_function(tr->ops); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 561 | |
| 562 | function_enabled = false; |
| 563 | } |
| 564 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 565 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 566 | { |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 567 | if (!(mask & TRACE_ITER_FUNCTION)) |
| 568 | return 0; |
| 569 | |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 570 | if (set) |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 571 | register_irqsoff_function(tr, is_graph(tr), 1); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 572 | else |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 573 | unregister_irqsoff_function(tr, is_graph(tr)); |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 574 | return 1; |
| 575 | } |
| 576 | #else |
| 577 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
| 578 | { |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 579 | return 0; |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 580 | } |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 581 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
| 582 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
| 583 | { |
| 584 | return 0; |
| 585 | } |
| 586 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 587 | |
Steven Rostedt (Red Hat) | bf6065b | 2014-01-10 17:51:01 -0500 | [diff] [blame] | 588 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 589 | { |
Steven Rostedt (Red Hat) | bf6065b | 2014-01-10 17:51:01 -0500 | [diff] [blame] | 590 | struct tracer *tracer = tr->current_trace; |
| 591 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 592 | if (irqsoff_function_set(tr, mask, set)) |
| 593 | return 0; |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 594 | |
Steven Rostedt (Red Hat) | 729358d | 2015-09-29 10:15:10 -0400 | [diff] [blame] | 595 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 596 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
| 597 | return irqsoff_display_graph(tr, set); |
Steven Rostedt (Red Hat) | 729358d | 2015-09-29 10:15:10 -0400 | [diff] [blame] | 598 | #endif |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 599 | |
| 600 | return trace_keep_overwrite(tracer, mask, set); |
| 601 | } |
| 602 | |
| 603 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) |
| 604 | { |
| 605 | int ret; |
| 606 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 607 | ret = register_irqsoff_function(tr, graph, 0); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 608 | |
| 609 | if (!ret && tracing_is_enabled()) |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 610 | tracer_enabled = 1; |
Steven Rostedt | 94523e8 | 2009-01-22 11:18:06 -0500 | [diff] [blame] | 611 | else |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 612 | tracer_enabled = 0; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 613 | |
| 614 | return ret; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 615 | } |
| 616 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 617 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 618 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 619 | tracer_enabled = 0; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 620 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 621 | unregister_irqsoff_function(tr, graph); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 622 | } |
| 623 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 624 | static bool irqsoff_busy; |
| 625 | |
| 626 | static int __irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 627 | { |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 628 | if (irqsoff_busy) |
| 629 | return -EBUSY; |
| 630 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 631 | save_flags = tr->trace_flags; |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 632 | |
| 633 | /* non overwrite screws up the latency tracers */ |
Steven Rostedt | 2b6080f | 2012-05-11 13:29:49 -0400 | [diff] [blame] | 634 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
| 635 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 636 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 637 | tr->max_latency = 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 638 | irqsoff_trace = tr; |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 639 | /* make sure that the tracer is visible */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 640 | smp_wmb(); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 641 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 642 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
| 643 | |
| 644 | /* Only toplevel instance supports graph tracing */ |
| 645 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 646 | is_graph(tr)))) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 647 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 648 | |
| 649 | irqsoff_busy = true; |
| 650 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 651 | } |
| 652 | |
| 653 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 654 | { |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 655 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
| 656 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; |
| 657 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 658 | stop_irqsoff_tracer(tr, is_graph(tr)); |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 659 | |
Steven Rostedt | 2b6080f | 2012-05-11 13:29:49 -0400 | [diff] [blame] | 660 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
| 661 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 662 | ftrace_reset_array_ops(tr); |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 663 | |
| 664 | irqsoff_busy = false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 665 | } |
| 666 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 667 | static void irqsoff_tracer_start(struct trace_array *tr) |
| 668 | { |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 669 | tracer_enabled = 1; |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | static void irqsoff_tracer_stop(struct trace_array *tr) |
| 673 | { |
| 674 | tracer_enabled = 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 675 | } |
| 676 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 677 | #ifdef CONFIG_IRQSOFF_TRACER |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 678 | static int irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 679 | { |
| 680 | trace_type = TRACER_IRQS_OFF; |
| 681 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 682 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 683 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 684 | static struct tracer irqsoff_tracer __read_mostly = |
| 685 | { |
| 686 | .name = "irqsoff", |
| 687 | .init = irqsoff_tracer_init, |
| 688 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 689 | .start = irqsoff_tracer_start, |
| 690 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 691 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 692 | .print_header = irqsoff_print_header, |
| 693 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 694 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 695 | #ifdef CONFIG_FTRACE_SELFTEST |
| 696 | .selftest = trace_selftest_startup_irqsoff, |
| 697 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 698 | .open = irqsoff_trace_open, |
| 699 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 700 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 701 | .use_max_tr = true, |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 702 | }; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 703 | # define register_irqsoff(trace) register_tracer(&trace) |
| 704 | #else |
| 705 | # define register_irqsoff(trace) do { } while (0) |
| 706 | #endif |
| 707 | |
| 708 | #ifdef CONFIG_PREEMPT_TRACER |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 709 | static int preemptoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 710 | { |
| 711 | trace_type = TRACER_PREEMPT_OFF; |
| 712 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 713 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 714 | } |
| 715 | |
| 716 | static struct tracer preemptoff_tracer __read_mostly = |
| 717 | { |
| 718 | .name = "preemptoff", |
| 719 | .init = preemptoff_tracer_init, |
| 720 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 721 | .start = irqsoff_tracer_start, |
| 722 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 723 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 724 | .print_header = irqsoff_print_header, |
| 725 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 726 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 727 | #ifdef CONFIG_FTRACE_SELFTEST |
| 728 | .selftest = trace_selftest_startup_preemptoff, |
| 729 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 730 | .open = irqsoff_trace_open, |
| 731 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 732 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 733 | .use_max_tr = true, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 734 | }; |
| 735 | # define register_preemptoff(trace) register_tracer(&trace) |
| 736 | #else |
| 737 | # define register_preemptoff(trace) do { } while (0) |
| 738 | #endif |
| 739 | |
| 740 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
| 741 | defined(CONFIG_PREEMPT_TRACER) |
| 742 | |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 743 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 744 | { |
| 745 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
| 746 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 747 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | static struct tracer preemptirqsoff_tracer __read_mostly = |
| 751 | { |
| 752 | .name = "preemptirqsoff", |
| 753 | .init = preemptirqsoff_tracer_init, |
| 754 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 755 | .start = irqsoff_tracer_start, |
| 756 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 757 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 758 | .print_header = irqsoff_print_header, |
| 759 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 760 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 761 | #ifdef CONFIG_FTRACE_SELFTEST |
| 762 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 763 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 764 | .open = irqsoff_trace_open, |
| 765 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 766 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 767 | .use_max_tr = true, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 768 | }; |
| 769 | |
| 770 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
| 771 | #else |
| 772 | # define register_preemptirqsoff(trace) do { } while (0) |
| 773 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 774 | |
| 775 | __init static int init_irqsoff_tracer(void) |
| 776 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 777 | register_irqsoff(irqsoff_tracer); |
| 778 | register_preemptoff(preemptoff_tracer); |
| 779 | register_preemptirqsoff(preemptirqsoff_tracer); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 780 | |
| 781 | return 0; |
| 782 | } |
Steven Rostedt | 6f41567 | 2012-10-05 12:13:07 -0400 | [diff] [blame] | 783 | core_initcall(init_irqsoff_tracer); |