Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_KERNEL_TRACE_H |
| 2 | #define _LINUX_KERNEL_TRACE_H |
| 3 | |
| 4 | #include <linux/fs.h> |
| 5 | #include <asm/atomic.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/clocksource.h> |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 8 | #include <linux/ring_buffer.h> |
Pekka Paalanen | bd8ac68 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 9 | #include <linux/mmiotrace.h> |
Frédéric Weisbecker | d13744c | 2008-09-23 11:32:08 +0100 | [diff] [blame] | 10 | #include <linux/ftrace.h> |
Frederic Weisbecker | 3f5ec13 | 2008-11-11 23:21:31 +0100 | [diff] [blame] | 11 | #include <trace/boot.h> |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 12 | |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 13 | enum trace_type { |
| 14 | __TRACE_FIRST_TYPE = 0, |
| 15 | |
| 16 | TRACE_FN, |
| 17 | TRACE_CTX, |
| 18 | TRACE_WAKE, |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 19 | TRACE_CONT, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 20 | TRACE_STACK, |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 21 | TRACE_PRINT, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 22 | TRACE_SPECIAL, |
Pekka Paalanen | bd8ac68 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 23 | TRACE_MMIO_RW, |
| 24 | TRACE_MMIO_MAP, |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 25 | TRACE_BRANCH, |
Frederic Weisbecker | 7423907 | 2008-11-11 23:24:42 +0100 | [diff] [blame] | 26 | TRACE_BOOT_CALL, |
| 27 | TRACE_BOOT_RET, |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 28 | TRACE_FN_RET, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 29 | |
| 30 | __TRACE_LAST_TYPE |
| 31 | }; |
| 32 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 33 | /* |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 34 | * The trace entry - the most basic unit of tracing. This is what |
| 35 | * is printed in the end as a single line in the trace output, such as: |
| 36 | * |
| 37 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter |
| 38 | */ |
| 39 | struct trace_entry { |
| 40 | unsigned char type; |
| 41 | unsigned char cpu; |
| 42 | unsigned char flags; |
| 43 | unsigned char preempt_count; |
| 44 | int pid; |
| 45 | }; |
| 46 | |
| 47 | /* |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 48 | * Function trace entry - function address and parent function addres: |
| 49 | */ |
| 50 | struct ftrace_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 51 | struct trace_entry ent; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 52 | unsigned long ip; |
| 53 | unsigned long parent_ip; |
| 54 | }; |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 55 | |
| 56 | /* Function return entry */ |
| 57 | struct ftrace_ret_entry { |
| 58 | struct trace_entry ent; |
| 59 | unsigned long ip; |
| 60 | unsigned long parent_ip; |
| 61 | unsigned long long calltime; |
| 62 | unsigned long long rettime; |
| 63 | }; |
Frédéric Weisbecker | d13744c | 2008-09-23 11:32:08 +0100 | [diff] [blame] | 64 | extern struct tracer boot_tracer; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * Context switch trace entry - which task (and prio) we switched from/to: |
| 68 | */ |
| 69 | struct ctx_switch_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 70 | struct trace_entry ent; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 71 | unsigned int prev_pid; |
| 72 | unsigned char prev_prio; |
| 73 | unsigned char prev_state; |
| 74 | unsigned int next_pid; |
| 75 | unsigned char next_prio; |
Peter Zijlstra | bac524d | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 76 | unsigned char next_state; |
Peter Zijlstra | 80b5e94 | 2008-09-04 10:24:16 +0200 | [diff] [blame] | 77 | unsigned int next_cpu; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 78 | }; |
| 79 | |
| 80 | /* |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 81 | * Special (free-form) trace entry: |
| 82 | */ |
| 83 | struct special_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 84 | struct trace_entry ent; |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 85 | unsigned long arg1; |
| 86 | unsigned long arg2; |
| 87 | unsigned long arg3; |
| 88 | }; |
| 89 | |
| 90 | /* |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 91 | * Stack-trace entry: |
| 92 | */ |
| 93 | |
Ingo Molnar | 74f4e36 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 94 | #define FTRACE_STACK_ENTRIES 8 |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 95 | |
| 96 | struct stack_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 97 | struct trace_entry ent; |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 98 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
| 99 | }; |
| 100 | |
| 101 | /* |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 102 | * ftrace_printk entry: |
| 103 | */ |
| 104 | struct print_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 105 | struct trace_entry ent; |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 106 | unsigned long ip; |
| 107 | char buf[]; |
| 108 | }; |
| 109 | |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 110 | #define TRACE_OLD_SIZE 88 |
| 111 | |
| 112 | struct trace_field_cont { |
| 113 | unsigned char type; |
| 114 | /* Temporary till we get rid of this completely */ |
| 115 | char buf[TRACE_OLD_SIZE - 1]; |
| 116 | }; |
| 117 | |
| 118 | struct trace_mmiotrace_rw { |
| 119 | struct trace_entry ent; |
| 120 | struct mmiotrace_rw rw; |
| 121 | }; |
| 122 | |
| 123 | struct trace_mmiotrace_map { |
| 124 | struct trace_entry ent; |
| 125 | struct mmiotrace_map map; |
| 126 | }; |
| 127 | |
Frederic Weisbecker | 7423907 | 2008-11-11 23:24:42 +0100 | [diff] [blame] | 128 | struct trace_boot_call { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 129 | struct trace_entry ent; |
Frederic Weisbecker | 7423907 | 2008-11-11 23:24:42 +0100 | [diff] [blame] | 130 | struct boot_trace_call boot_call; |
| 131 | }; |
| 132 | |
| 133 | struct trace_boot_ret { |
| 134 | struct trace_entry ent; |
| 135 | struct boot_trace_ret boot_ret; |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 136 | }; |
| 137 | |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 138 | #define TRACE_FUNC_SIZE 30 |
| 139 | #define TRACE_FILE_SIZE 20 |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 140 | struct trace_branch { |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 141 | struct trace_entry ent; |
| 142 | unsigned line; |
| 143 | char func[TRACE_FUNC_SIZE+1]; |
| 144 | char file[TRACE_FILE_SIZE+1]; |
| 145 | char correct; |
| 146 | }; |
| 147 | |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 148 | /* |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 149 | * trace_flag_type is an enumeration that holds different |
| 150 | * states when a trace occurs. These are: |
Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 151 | * IRQS_OFF - interrupts were disabled |
| 152 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
| 153 | * NEED_RESCED - reschedule is requested |
| 154 | * HARDIRQ - inside an interrupt handler |
| 155 | * SOFTIRQ - inside a softirq handler |
| 156 | * CONT - multiple entries hold the trace item |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 157 | */ |
| 158 | enum trace_flag_type { |
| 159 | TRACE_FLAG_IRQS_OFF = 0x01, |
Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 160 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
| 161 | TRACE_FLAG_NEED_RESCHED = 0x04, |
| 162 | TRACE_FLAG_HARDIRQ = 0x08, |
| 163 | TRACE_FLAG_SOFTIRQ = 0x10, |
| 164 | TRACE_FLAG_CONT = 0x20, |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 165 | }; |
| 166 | |
Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 167 | #define TRACE_BUF_SIZE 1024 |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * The CPU trace array - it consists of thousands of trace entries |
| 171 | * plus some other descriptor data: (for example which task started |
| 172 | * the trace, etc.) |
| 173 | */ |
| 174 | struct trace_array_cpu { |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 175 | atomic_t disabled; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 176 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 177 | /* these fields get copied into max-trace: */ |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 178 | unsigned long trace_idx; |
Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 179 | unsigned long overrun; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 180 | unsigned long saved_latency; |
| 181 | unsigned long critical_start; |
| 182 | unsigned long critical_end; |
| 183 | unsigned long critical_sequence; |
| 184 | unsigned long nice; |
| 185 | unsigned long policy; |
| 186 | unsigned long rt_priority; |
| 187 | cycle_t preempt_timestamp; |
| 188 | pid_t pid; |
| 189 | uid_t uid; |
| 190 | char comm[TASK_COMM_LEN]; |
| 191 | }; |
| 192 | |
| 193 | struct trace_iterator; |
| 194 | |
| 195 | /* |
| 196 | * The trace array - an array of per-CPU trace arrays. This is the |
| 197 | * highest level data structure that individual tracers deal with. |
| 198 | * They have on/off state as well: |
| 199 | */ |
| 200 | struct trace_array { |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 201 | struct ring_buffer *buffer; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 202 | unsigned long entries; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 203 | int cpu; |
| 204 | cycle_t time_start; |
Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 205 | struct task_struct *waiter; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 206 | struct trace_array_cpu *data[NR_CPUS]; |
| 207 | }; |
| 208 | |
Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 209 | #define FTRACE_CMP_TYPE(var, type) \ |
| 210 | __builtin_types_compatible_p(typeof(var), type *) |
| 211 | |
| 212 | #undef IF_ASSIGN |
| 213 | #define IF_ASSIGN(var, entry, etype, id) \ |
| 214 | if (FTRACE_CMP_TYPE(var, etype)) { \ |
| 215 | var = (typeof(var))(entry); \ |
| 216 | WARN_ON(id && (entry)->type != id); \ |
| 217 | break; \ |
| 218 | } |
| 219 | |
| 220 | /* Will cause compile errors if type is not found. */ |
| 221 | extern void __ftrace_bad_type(void); |
| 222 | |
| 223 | /* |
| 224 | * The trace_assign_type is a verifier that the entry type is |
| 225 | * the same as the type being assigned. To add new types simply |
| 226 | * add a line with the following format: |
| 227 | * |
| 228 | * IF_ASSIGN(var, ent, type, id); |
| 229 | * |
| 230 | * Where "type" is the trace type that includes the trace_entry |
| 231 | * as the "ent" item. And "id" is the trace identifier that is |
| 232 | * used in the trace_type enum. |
| 233 | * |
| 234 | * If the type can have more than one id, then use zero. |
| 235 | */ |
| 236 | #define trace_assign_type(var, ent) \ |
| 237 | do { \ |
| 238 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
| 239 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
| 240 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ |
| 241 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
| 242 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
| 243 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
| 244 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
| 245 | TRACE_MMIO_RW); \ |
| 246 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
| 247 | TRACE_MMIO_MAP); \ |
Frederic Weisbecker | 7423907 | 2008-11-11 23:24:42 +0100 | [diff] [blame] | 248 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
| 249 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 250 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
Frederic Weisbecker | 7423907 | 2008-11-11 23:24:42 +0100 | [diff] [blame] | 251 | IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ |
Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 252 | __ftrace_bad_type(); \ |
| 253 | } while (0) |
Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 254 | |
| 255 | /* Return values for print_line callback */ |
| 256 | enum print_line_t { |
| 257 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
| 258 | TRACE_TYPE_HANDLED = 1, |
| 259 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ |
| 260 | }; |
| 261 | |
Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame^] | 262 | |
| 263 | /* |
| 264 | * An option specific to a tracer. This is a boolean value. |
| 265 | * The bit is the bit index that sets its value on the |
| 266 | * flags value in struct tracer_flags. |
| 267 | */ |
| 268 | struct tracer_opt { |
| 269 | const char *name; /* Will appear on the trace_options file */ |
| 270 | u32 bit; /* Mask assigned in val field in tracer_flags */ |
| 271 | }; |
| 272 | |
| 273 | /* |
| 274 | * The set of specific options for a tracer. Your tracer |
| 275 | * have to set the initial value of the flags val. |
| 276 | */ |
| 277 | struct tracer_flags { |
| 278 | u32 val; |
| 279 | struct tracer_opt *opts; |
| 280 | }; |
| 281 | |
| 282 | /* Makes more easy to define a tracer opt */ |
| 283 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
| 284 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 285 | /* |
| 286 | * A specific tracer, represented by methods that operate on a trace array: |
| 287 | */ |
| 288 | struct tracer { |
| 289 | const char *name; |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 290 | /* Your tracer should raise a warning if init fails */ |
| 291 | int (*init)(struct trace_array *tr); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 292 | void (*reset)(struct trace_array *tr); |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 293 | void (*start)(struct trace_array *tr); |
| 294 | void (*stop)(struct trace_array *tr); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 295 | void (*open)(struct trace_iterator *iter); |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 296 | void (*pipe_open)(struct trace_iterator *iter); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 297 | void (*close)(struct trace_iterator *iter); |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 298 | ssize_t (*read)(struct trace_iterator *iter, |
| 299 | struct file *filp, char __user *ubuf, |
| 300 | size_t cnt, loff_t *ppos); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 301 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 302 | int (*selftest)(struct tracer *trace, |
| 303 | struct trace_array *tr); |
| 304 | #endif |
Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 305 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame^] | 306 | /* If you handled the flag setting, return 0 */ |
| 307 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 308 | struct tracer *next; |
| 309 | int print_max; |
Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame^] | 310 | struct tracer_flags *flags; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 311 | }; |
| 312 | |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 313 | struct trace_seq { |
| 314 | unsigned char buffer[PAGE_SIZE]; |
| 315 | unsigned int len; |
Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 316 | unsigned int readpos; |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 317 | }; |
| 318 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 319 | /* |
| 320 | * Trace iterator - used by printout routines who present trace |
| 321 | * results to users and which routines might sleep, etc: |
| 322 | */ |
| 323 | struct trace_iterator { |
| 324 | struct trace_array *tr; |
| 325 | struct tracer *trace; |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 326 | void *private; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 327 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 328 | |
Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 329 | /* The below is zeroed out in pipe_read */ |
| 330 | struct trace_seq seq; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 331 | struct trace_entry *ent; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 332 | int cpu; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 333 | u64 ts; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 334 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 335 | unsigned long iter_flags; |
| 336 | loff_t pos; |
Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 337 | long idx; |
Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 338 | |
| 339 | cpumask_t started; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 340 | }; |
| 341 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 342 | int tracing_is_enabled(void); |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 343 | void trace_wake_up(void); |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 344 | void tracing_reset(struct trace_array *tr, int cpu); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 345 | int tracing_open_generic(struct inode *inode, struct file *filp); |
| 346 | struct dentry *tracing_init_dentry(void); |
Ingo Molnar | d618b3e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 347 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
| 348 | |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 349 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
| 350 | struct trace_array_cpu *data); |
| 351 | void tracing_generic_entry_update(struct trace_entry *entry, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 352 | unsigned long flags, |
| 353 | int pc); |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 354 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 355 | void ftrace(struct trace_array *tr, |
| 356 | struct trace_array_cpu *data, |
| 357 | unsigned long ip, |
| 358 | unsigned long parent_ip, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 359 | unsigned long flags, int pc); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 360 | void tracing_sched_switch_trace(struct trace_array *tr, |
| 361 | struct trace_array_cpu *data, |
| 362 | struct task_struct *prev, |
| 363 | struct task_struct *next, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 364 | unsigned long flags, int pc); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 365 | void tracing_record_cmdline(struct task_struct *tsk); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 366 | |
| 367 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
| 368 | struct trace_array_cpu *data, |
| 369 | struct task_struct *wakee, |
| 370 | struct task_struct *cur, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 371 | unsigned long flags, int pc); |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 372 | void trace_special(struct trace_array *tr, |
| 373 | struct trace_array_cpu *data, |
| 374 | unsigned long arg1, |
| 375 | unsigned long arg2, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 376 | unsigned long arg3, int pc); |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 377 | void trace_function(struct trace_array *tr, |
| 378 | struct trace_array_cpu *data, |
| 379 | unsigned long ip, |
| 380 | unsigned long parent_ip, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 381 | unsigned long flags, int pc); |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 382 | void |
| 383 | trace_function_return(struct ftrace_retfunc *trace); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 384 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 385 | void tracing_start_cmdline_record(void); |
| 386 | void tracing_stop_cmdline_record(void); |
Steven Rostedt | e168e05 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 387 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
| 388 | void tracing_stop_sched_switch_record(void); |
| 389 | void tracing_start_sched_switch_record(void); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 390 | int register_tracer(struct tracer *type); |
| 391 | void unregister_tracer(struct tracer *type); |
| 392 | |
| 393 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 394 | |
| 395 | extern unsigned long tracing_max_latency; |
| 396 | extern unsigned long tracing_thresh; |
| 397 | |
| 398 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 399 | void update_max_tr_single(struct trace_array *tr, |
| 400 | struct task_struct *tsk, int cpu); |
| 401 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 402 | extern cycle_t ftrace_now(int cpu); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 403 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 404 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 001b676 | 2008-07-10 20:58:10 -0400 | [diff] [blame] | 405 | void tracing_start_function_trace(void); |
| 406 | void tracing_stop_function_trace(void); |
| 407 | #else |
| 408 | # define tracing_start_function_trace() do { } while (0) |
| 409 | # define tracing_stop_function_trace() do { } while (0) |
| 410 | #endif |
| 411 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 412 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 413 | typedef void |
| 414 | (*tracer_switch_func_t)(void *private, |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 415 | void *__rq, |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 416 | struct task_struct *prev, |
| 417 | struct task_struct *next); |
| 418 | |
| 419 | struct tracer_switch_ops { |
| 420 | tracer_switch_func_t func; |
| 421 | void *private; |
| 422 | struct tracer_switch_ops *next; |
| 423 | }; |
| 424 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 425 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
| 426 | |
| 427 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 428 | extern unsigned long ftrace_update_tot_cnt; |
Steven Rostedt | d05cdb2 | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 429 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
| 430 | extern int DYN_FTRACE_TEST_NAME(void); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 431 | #endif |
| 432 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 433 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 434 | extern int trace_selftest_startup_function(struct tracer *trace, |
| 435 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 436 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
| 437 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 438 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
| 439 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 440 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
| 441 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 442 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
| 443 | struct trace_array *tr); |
Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 444 | extern int trace_selftest_startup_nop(struct tracer *trace, |
| 445 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 446 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
| 447 | struct trace_array *tr); |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 448 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
| 449 | struct trace_array *tr); |
Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 450 | extern int trace_selftest_startup_branch(struct tracer *trace, |
| 451 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 452 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 453 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 454 | extern void *head_page(struct trace_array_cpu *data); |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 455 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 456 | extern void trace_seq_print_cont(struct trace_seq *s, |
| 457 | struct trace_iterator *iter); |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 458 | |
| 459 | extern int |
| 460 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, |
| 461 | unsigned long sym_flags); |
Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 462 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
| 463 | size_t cnt); |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 464 | extern long ns2usecs(cycle_t nsec); |
Pekka Paalanen | 801fe40 | 2008-09-16 21:58:24 +0300 | [diff] [blame] | 465 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 466 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 467 | extern unsigned long trace_flags; |
| 468 | |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 469 | /* Standard output formatting function used for function return traces */ |
| 470 | #ifdef CONFIG_FUNCTION_RET_TRACER |
| 471 | extern enum print_line_t print_return_function(struct trace_iterator *iter); |
| 472 | #else |
| 473 | static inline enum print_line_t |
| 474 | print_return_function(struct trace_iterator *iter) |
| 475 | { |
| 476 | return TRACE_TYPE_UNHANDLED; |
| 477 | } |
| 478 | #endif |
| 479 | |
Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 480 | /* |
| 481 | * trace_iterator_flags is an enumeration that defines bit |
| 482 | * positions into trace_flags that controls the output. |
| 483 | * |
| 484 | * NOTE: These bits must match the trace_options array in |
| 485 | * trace.c. |
| 486 | */ |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 487 | enum trace_iterator_flags { |
| 488 | TRACE_ITER_PRINT_PARENT = 0x01, |
| 489 | TRACE_ITER_SYM_OFFSET = 0x02, |
| 490 | TRACE_ITER_SYM_ADDR = 0x04, |
| 491 | TRACE_ITER_VERBOSE = 0x08, |
| 492 | TRACE_ITER_RAW = 0x10, |
| 493 | TRACE_ITER_HEX = 0x20, |
| 494 | TRACE_ITER_BIN = 0x40, |
| 495 | TRACE_ITER_BLOCK = 0x80, |
| 496 | TRACE_ITER_STACKTRACE = 0x100, |
Ingo Molnar | 4ac3ba4 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 497 | TRACE_ITER_SCHED_TREE = 0x200, |
Peter Zijlstra | f09ce57 | 2008-09-04 10:24:14 +0200 | [diff] [blame] | 498 | TRACE_ITER_PRINTK = 0x400, |
Steven Rostedt | b2a866f | 2008-11-03 23:15:57 -0500 | [diff] [blame] | 499 | TRACE_ITER_PREEMPTONLY = 0x800, |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 500 | TRACE_ITER_BRANCH = 0x1000, |
Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 501 | TRACE_ITER_ANNOTATE = 0x2000, |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 502 | }; |
| 503 | |
Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 504 | /* |
| 505 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that |
| 506 | * control the output of kernel symbols. |
| 507 | */ |
| 508 | #define TRACE_ITER_SYM_MASK \ |
| 509 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
| 510 | |
Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 511 | extern struct tracer nop_trace; |
| 512 | |
Steven Rostedt | 8f0a056 | 2008-11-03 23:15:55 -0500 | [diff] [blame] | 513 | /** |
| 514 | * ftrace_preempt_disable - disable preemption scheduler safe |
| 515 | * |
| 516 | * When tracing can happen inside the scheduler, there exists |
| 517 | * cases that the tracing might happen before the need_resched |
| 518 | * flag is checked. If this happens and the tracer calls |
| 519 | * preempt_enable (after a disable), a schedule might take place |
| 520 | * causing an infinite recursion. |
| 521 | * |
| 522 | * To prevent this, we read the need_recshed flag before |
| 523 | * disabling preemption. When we want to enable preemption we |
| 524 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
| 525 | * Otherwise, we call preempt_enable. |
| 526 | * |
| 527 | * The rational for doing the above is that if need resched is set |
| 528 | * and we have yet to reschedule, we are either in an atomic location |
| 529 | * (where we do not need to check for scheduling) or we are inside |
| 530 | * the scheduler and do not want to resched. |
| 531 | */ |
| 532 | static inline int ftrace_preempt_disable(void) |
| 533 | { |
| 534 | int resched; |
| 535 | |
| 536 | resched = need_resched(); |
| 537 | preempt_disable_notrace(); |
| 538 | |
| 539 | return resched; |
| 540 | } |
| 541 | |
| 542 | /** |
| 543 | * ftrace_preempt_enable - enable preemption scheduler safe |
| 544 | * @resched: the return value from ftrace_preempt_disable |
| 545 | * |
| 546 | * This is a scheduler safe way to enable preemption and not miss |
| 547 | * any preemption checks. The disabled saved the state of preemption. |
| 548 | * If resched is set, then we were either inside an atomic or |
| 549 | * are inside the scheduler (we would have already scheduled |
| 550 | * otherwise). In this case, we do not want to call normal |
| 551 | * preempt_enable, but preempt_enable_no_resched instead. |
| 552 | */ |
| 553 | static inline void ftrace_preempt_enable(int resched) |
| 554 | { |
| 555 | if (resched) |
| 556 | preempt_enable_no_resched_notrace(); |
| 557 | else |
| 558 | preempt_enable_notrace(); |
| 559 | } |
| 560 | |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 561 | #ifdef CONFIG_BRANCH_TRACER |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 562 | extern int enable_branch_tracing(struct trace_array *tr); |
| 563 | extern void disable_branch_tracing(void); |
| 564 | static inline int trace_branch_enable(struct trace_array *tr) |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 565 | { |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 566 | if (trace_flags & TRACE_ITER_BRANCH) |
| 567 | return enable_branch_tracing(tr); |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 568 | return 0; |
| 569 | } |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 570 | static inline void trace_branch_disable(void) |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 571 | { |
| 572 | /* due to races, always disable */ |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 573 | disable_branch_tracing(); |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 574 | } |
| 575 | #else |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 576 | static inline int trace_branch_enable(struct trace_array *tr) |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 577 | { |
| 578 | return 0; |
| 579 | } |
Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 580 | static inline void trace_branch_disable(void) |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 581 | { |
| 582 | } |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 583 | #endif /* CONFIG_BRANCH_TRACER */ |
Steven Rostedt | 52f232c | 2008-11-12 00:14:40 -0500 | [diff] [blame] | 584 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 585 | #endif /* _LINUX_KERNEL_TRACE_H */ |