Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_KERNEL_TRACE_H |
| 2 | #define _LINUX_KERNEL_TRACE_H |
| 3 | |
| 4 | #include <linux/fs.h> |
| 5 | #include <asm/atomic.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/clocksource.h> |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 8 | #include <linux/ring_buffer.h> |
Pekka Paalanen | bd8ac68 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 9 | #include <linux/mmiotrace.h> |
Frédéric Weisbecker | d13744c | 2008-09-23 11:32:08 +0100 | [diff] [blame] | 10 | #include <linux/ftrace.h> |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 11 | |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 12 | enum trace_type { |
| 13 | __TRACE_FIRST_TYPE = 0, |
| 14 | |
| 15 | TRACE_FN, |
| 16 | TRACE_CTX, |
| 17 | TRACE_WAKE, |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 18 | TRACE_CONT, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 19 | TRACE_STACK, |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 20 | TRACE_PRINT, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 21 | TRACE_SPECIAL, |
Pekka Paalanen | bd8ac68 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 22 | TRACE_MMIO_RW, |
| 23 | TRACE_MMIO_MAP, |
Frédéric Weisbecker | d13744c | 2008-09-23 11:32:08 +0100 | [diff] [blame] | 24 | TRACE_BOOT, |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 25 | |
| 26 | __TRACE_LAST_TYPE |
| 27 | }; |
| 28 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 29 | /* |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 30 | * The trace entry - the most basic unit of tracing. This is what |
| 31 | * is printed in the end as a single line in the trace output, such as: |
| 32 | * |
| 33 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter |
| 34 | */ |
| 35 | struct trace_entry { |
| 36 | unsigned char type; |
| 37 | unsigned char cpu; |
| 38 | unsigned char flags; |
| 39 | unsigned char preempt_count; |
| 40 | int pid; |
| 41 | }; |
| 42 | |
| 43 | /* |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 44 | * Function trace entry - function address and parent function addres: |
| 45 | */ |
| 46 | struct ftrace_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 47 | struct trace_entry ent; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 48 | unsigned long ip; |
| 49 | unsigned long parent_ip; |
| 50 | }; |
Frédéric Weisbecker | d13744c | 2008-09-23 11:32:08 +0100 | [diff] [blame] | 51 | extern struct tracer boot_tracer; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 52 | |
| 53 | /* |
| 54 | * Context switch trace entry - which task (and prio) we switched from/to: |
| 55 | */ |
| 56 | struct ctx_switch_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 57 | struct trace_entry ent; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 58 | unsigned int prev_pid; |
| 59 | unsigned char prev_prio; |
| 60 | unsigned char prev_state; |
| 61 | unsigned int next_pid; |
| 62 | unsigned char next_prio; |
Peter Zijlstra | bac524d | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 63 | unsigned char next_state; |
Peter Zijlstra | 80b5e94 | 2008-09-04 10:24:16 +0200 | [diff] [blame] | 64 | unsigned int next_cpu; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 65 | }; |
| 66 | |
| 67 | /* |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 68 | * Special (free-form) trace entry: |
| 69 | */ |
| 70 | struct special_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 71 | struct trace_entry ent; |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 72 | unsigned long arg1; |
| 73 | unsigned long arg2; |
| 74 | unsigned long arg3; |
| 75 | }; |
| 76 | |
| 77 | /* |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 78 | * Stack-trace entry: |
| 79 | */ |
| 80 | |
Ingo Molnar | 74f4e36 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 81 | #define FTRACE_STACK_ENTRIES 8 |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 82 | |
| 83 | struct stack_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 84 | struct trace_entry ent; |
Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 85 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
| 86 | }; |
| 87 | |
| 88 | /* |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 89 | * ftrace_printk entry: |
| 90 | */ |
| 91 | struct print_entry { |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 92 | struct trace_entry ent; |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 93 | unsigned long ip; |
| 94 | char buf[]; |
| 95 | }; |
| 96 | |
Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 97 | #define TRACE_OLD_SIZE 88 |
| 98 | |
| 99 | struct trace_field_cont { |
| 100 | unsigned char type; |
| 101 | /* Temporary till we get rid of this completely */ |
| 102 | char buf[TRACE_OLD_SIZE - 1]; |
| 103 | }; |
| 104 | |
| 105 | struct trace_mmiotrace_rw { |
| 106 | struct trace_entry ent; |
| 107 | struct mmiotrace_rw rw; |
| 108 | }; |
| 109 | |
| 110 | struct trace_mmiotrace_map { |
| 111 | struct trace_entry ent; |
| 112 | struct mmiotrace_map map; |
| 113 | }; |
| 114 | |
| 115 | struct trace_boot { |
| 116 | struct trace_entry ent; |
| 117 | struct boot_trace initcall; |
| 118 | }; |
| 119 | |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 120 | /* |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 121 | * trace_flag_type is an enumeration that holds different |
| 122 | * states when a trace occurs. These are: |
Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 123 | * IRQS_OFF - interrupts were disabled |
| 124 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
| 125 | * NEED_RESCED - reschedule is requested |
| 126 | * HARDIRQ - inside an interrupt handler |
| 127 | * SOFTIRQ - inside a softirq handler |
| 128 | * CONT - multiple entries hold the trace item |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 129 | */ |
| 130 | enum trace_flag_type { |
| 131 | TRACE_FLAG_IRQS_OFF = 0x01, |
Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 132 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
| 133 | TRACE_FLAG_NEED_RESCHED = 0x04, |
| 134 | TRACE_FLAG_HARDIRQ = 0x08, |
| 135 | TRACE_FLAG_SOFTIRQ = 0x10, |
| 136 | TRACE_FLAG_CONT = 0x20, |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 137 | }; |
| 138 | |
Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 139 | #define TRACE_BUF_SIZE 1024 |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 140 | |
| 141 | /* |
| 142 | * The CPU trace array - it consists of thousands of trace entries |
| 143 | * plus some other descriptor data: (for example which task started |
| 144 | * the trace, etc.) |
| 145 | */ |
| 146 | struct trace_array_cpu { |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 147 | atomic_t disabled; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 148 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 149 | /* these fields get copied into max-trace: */ |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 150 | unsigned long trace_idx; |
Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 151 | unsigned long overrun; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 152 | unsigned long saved_latency; |
| 153 | unsigned long critical_start; |
| 154 | unsigned long critical_end; |
| 155 | unsigned long critical_sequence; |
| 156 | unsigned long nice; |
| 157 | unsigned long policy; |
| 158 | unsigned long rt_priority; |
| 159 | cycle_t preempt_timestamp; |
| 160 | pid_t pid; |
| 161 | uid_t uid; |
| 162 | char comm[TASK_COMM_LEN]; |
| 163 | }; |
| 164 | |
| 165 | struct trace_iterator; |
| 166 | |
| 167 | /* |
| 168 | * The trace array - an array of per-CPU trace arrays. This is the |
| 169 | * highest level data structure that individual tracers deal with. |
| 170 | * They have on/off state as well: |
| 171 | */ |
| 172 | struct trace_array { |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 173 | struct ring_buffer *buffer; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 174 | unsigned long entries; |
| 175 | long ctrl; |
| 176 | int cpu; |
| 177 | cycle_t time_start; |
Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 178 | struct task_struct *waiter; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 179 | struct trace_array_cpu *data[NR_CPUS]; |
| 180 | }; |
| 181 | |
Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 182 | #define FTRACE_CMP_TYPE(var, type) \ |
| 183 | __builtin_types_compatible_p(typeof(var), type *) |
| 184 | |
| 185 | #undef IF_ASSIGN |
| 186 | #define IF_ASSIGN(var, entry, etype, id) \ |
| 187 | if (FTRACE_CMP_TYPE(var, etype)) { \ |
| 188 | var = (typeof(var))(entry); \ |
| 189 | WARN_ON(id && (entry)->type != id); \ |
| 190 | break; \ |
| 191 | } |
| 192 | |
| 193 | /* Will cause compile errors if type is not found. */ |
| 194 | extern void __ftrace_bad_type(void); |
| 195 | |
| 196 | /* |
| 197 | * The trace_assign_type is a verifier that the entry type is |
| 198 | * the same as the type being assigned. To add new types simply |
| 199 | * add a line with the following format: |
| 200 | * |
| 201 | * IF_ASSIGN(var, ent, type, id); |
| 202 | * |
| 203 | * Where "type" is the trace type that includes the trace_entry |
| 204 | * as the "ent" item. And "id" is the trace identifier that is |
| 205 | * used in the trace_type enum. |
| 206 | * |
| 207 | * If the type can have more than one id, then use zero. |
| 208 | */ |
| 209 | #define trace_assign_type(var, ent) \ |
| 210 | do { \ |
| 211 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
| 212 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
| 213 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ |
| 214 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
| 215 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
| 216 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
| 217 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
| 218 | TRACE_MMIO_RW); \ |
| 219 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
| 220 | TRACE_MMIO_MAP); \ |
| 221 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ |
| 222 | __ftrace_bad_type(); \ |
| 223 | } while (0) |
Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 224 | |
| 225 | /* Return values for print_line callback */ |
| 226 | enum print_line_t { |
| 227 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
| 228 | TRACE_TYPE_HANDLED = 1, |
| 229 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ |
| 230 | }; |
| 231 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 232 | /* |
| 233 | * A specific tracer, represented by methods that operate on a trace array: |
| 234 | */ |
| 235 | struct tracer { |
| 236 | const char *name; |
| 237 | void (*init)(struct trace_array *tr); |
| 238 | void (*reset)(struct trace_array *tr); |
| 239 | void (*open)(struct trace_iterator *iter); |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 240 | void (*pipe_open)(struct trace_iterator *iter); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | void (*close)(struct trace_iterator *iter); |
| 242 | void (*start)(struct trace_iterator *iter); |
| 243 | void (*stop)(struct trace_iterator *iter); |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 244 | ssize_t (*read)(struct trace_iterator *iter, |
| 245 | struct file *filp, char __user *ubuf, |
| 246 | size_t cnt, loff_t *ppos); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 247 | void (*ctrl_update)(struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 248 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 249 | int (*selftest)(struct tracer *trace, |
| 250 | struct trace_array *tr); |
| 251 | #endif |
Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 252 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 253 | struct tracer *next; |
| 254 | int print_max; |
| 255 | }; |
| 256 | |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 257 | struct trace_seq { |
| 258 | unsigned char buffer[PAGE_SIZE]; |
| 259 | unsigned int len; |
Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 260 | unsigned int readpos; |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 261 | }; |
| 262 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 263 | /* |
| 264 | * Trace iterator - used by printout routines who present trace |
| 265 | * results to users and which routines might sleep, etc: |
| 266 | */ |
| 267 | struct trace_iterator { |
| 268 | struct trace_array *tr; |
| 269 | struct tracer *trace; |
Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 270 | void *private; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 271 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 272 | |
Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 273 | /* The below is zeroed out in pipe_read */ |
| 274 | struct trace_seq seq; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 275 | struct trace_entry *ent; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 276 | int cpu; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 277 | u64 ts; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 278 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 279 | unsigned long iter_flags; |
| 280 | loff_t pos; |
Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 281 | long idx; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 282 | }; |
| 283 | |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 284 | void trace_wake_up(void); |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 285 | void tracing_reset(struct trace_array *tr, int cpu); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 286 | int tracing_open_generic(struct inode *inode, struct file *filp); |
| 287 | struct dentry *tracing_init_dentry(void); |
Ingo Molnar | d618b3e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 288 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
| 289 | |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 290 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
| 291 | struct trace_array_cpu *data); |
| 292 | void tracing_generic_entry_update(struct trace_entry *entry, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 293 | unsigned long flags, |
| 294 | int pc); |
Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 295 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 296 | void ftrace(struct trace_array *tr, |
| 297 | struct trace_array_cpu *data, |
| 298 | unsigned long ip, |
| 299 | unsigned long parent_ip, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 300 | unsigned long flags, int pc); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 301 | void tracing_sched_switch_trace(struct trace_array *tr, |
| 302 | struct trace_array_cpu *data, |
| 303 | struct task_struct *prev, |
| 304 | struct task_struct *next, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 305 | unsigned long flags, int pc); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 306 | void tracing_record_cmdline(struct task_struct *tsk); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 307 | |
| 308 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
| 309 | struct trace_array_cpu *data, |
| 310 | struct task_struct *wakee, |
| 311 | struct task_struct *cur, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 312 | unsigned long flags, int pc); |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 313 | void trace_special(struct trace_array *tr, |
| 314 | struct trace_array_cpu *data, |
| 315 | unsigned long arg1, |
| 316 | unsigned long arg2, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 317 | unsigned long arg3, int pc); |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 318 | void trace_function(struct trace_array *tr, |
| 319 | struct trace_array_cpu *data, |
| 320 | unsigned long ip, |
| 321 | unsigned long parent_ip, |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 322 | unsigned long flags, int pc); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 323 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 324 | void tracing_start_cmdline_record(void); |
| 325 | void tracing_stop_cmdline_record(void); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 326 | int register_tracer(struct tracer *type); |
| 327 | void unregister_tracer(struct tracer *type); |
| 328 | |
| 329 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 330 | |
| 331 | extern unsigned long tracing_max_latency; |
| 332 | extern unsigned long tracing_thresh; |
| 333 | |
| 334 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 335 | void update_max_tr_single(struct trace_array *tr, |
| 336 | struct task_struct *tsk, int cpu); |
| 337 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 338 | extern cycle_t ftrace_now(int cpu); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 339 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 340 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 001b676 | 2008-07-10 20:58:10 -0400 | [diff] [blame] | 341 | void tracing_start_function_trace(void); |
| 342 | void tracing_stop_function_trace(void); |
| 343 | #else |
| 344 | # define tracing_start_function_trace() do { } while (0) |
| 345 | # define tracing_stop_function_trace() do { } while (0) |
| 346 | #endif |
| 347 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 348 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 349 | typedef void |
| 350 | (*tracer_switch_func_t)(void *private, |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 351 | void *__rq, |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 352 | struct task_struct *prev, |
| 353 | struct task_struct *next); |
| 354 | |
| 355 | struct tracer_switch_ops { |
| 356 | tracer_switch_func_t func; |
| 357 | void *private; |
| 358 | struct tracer_switch_ops *next; |
| 359 | }; |
| 360 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 361 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
| 362 | |
| 363 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 364 | extern unsigned long ftrace_update_tot_cnt; |
Steven Rostedt | d05cdb2 | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 365 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
| 366 | extern int DYN_FTRACE_TEST_NAME(void); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 367 | #endif |
| 368 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 369 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 370 | extern int trace_selftest_startup_function(struct tracer *trace, |
| 371 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 372 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
| 373 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 374 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
| 375 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 376 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
| 377 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 378 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
| 379 | struct trace_array *tr); |
Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 380 | extern int trace_selftest_startup_nop(struct tracer *trace, |
| 381 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 382 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
| 383 | struct trace_array *tr); |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 384 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
| 385 | struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 386 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 387 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 388 | extern void *head_page(struct trace_array_cpu *data); |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 389 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
Pekka Paalanen | fc5e27a | 2008-09-16 22:02:27 +0300 | [diff] [blame] | 390 | extern void trace_seq_print_cont(struct trace_seq *s, |
| 391 | struct trace_iterator *iter); |
Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 392 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
| 393 | size_t cnt); |
Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 394 | extern long ns2usecs(cycle_t nsec); |
Pekka Paalanen | 801fe40 | 2008-09-16 21:58:24 +0300 | [diff] [blame] | 395 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 396 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 397 | extern unsigned long trace_flags; |
| 398 | |
Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 399 | /* |
| 400 | * trace_iterator_flags is an enumeration that defines bit |
| 401 | * positions into trace_flags that controls the output. |
| 402 | * |
| 403 | * NOTE: These bits must match the trace_options array in |
| 404 | * trace.c. |
| 405 | */ |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 406 | enum trace_iterator_flags { |
| 407 | TRACE_ITER_PRINT_PARENT = 0x01, |
| 408 | TRACE_ITER_SYM_OFFSET = 0x02, |
| 409 | TRACE_ITER_SYM_ADDR = 0x04, |
| 410 | TRACE_ITER_VERBOSE = 0x08, |
| 411 | TRACE_ITER_RAW = 0x10, |
| 412 | TRACE_ITER_HEX = 0x20, |
| 413 | TRACE_ITER_BIN = 0x40, |
| 414 | TRACE_ITER_BLOCK = 0x80, |
| 415 | TRACE_ITER_STACKTRACE = 0x100, |
Ingo Molnar | 4ac3ba4 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 416 | TRACE_ITER_SCHED_TREE = 0x200, |
Peter Zijlstra | f09ce57 | 2008-09-04 10:24:14 +0200 | [diff] [blame] | 417 | TRACE_ITER_PRINTK = 0x400, |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 418 | }; |
| 419 | |
Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 420 | extern struct tracer nop_trace; |
| 421 | |
Steven Rostedt | 8f0a056 | 2008-11-03 23:15:55 -0500 | [diff] [blame^] | 422 | /** |
| 423 | * ftrace_preempt_disable - disable preemption scheduler safe |
| 424 | * |
| 425 | * When tracing can happen inside the scheduler, there exists |
| 426 | * cases that the tracing might happen before the need_resched |
| 427 | * flag is checked. If this happens and the tracer calls |
| 428 | * preempt_enable (after a disable), a schedule might take place |
| 429 | * causing an infinite recursion. |
| 430 | * |
| 431 | * To prevent this, we read the need_recshed flag before |
| 432 | * disabling preemption. When we want to enable preemption we |
| 433 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
| 434 | * Otherwise, we call preempt_enable. |
| 435 | * |
| 436 | * The rational for doing the above is that if need resched is set |
| 437 | * and we have yet to reschedule, we are either in an atomic location |
| 438 | * (where we do not need to check for scheduling) or we are inside |
| 439 | * the scheduler and do not want to resched. |
| 440 | */ |
| 441 | static inline int ftrace_preempt_disable(void) |
| 442 | { |
| 443 | int resched; |
| 444 | |
| 445 | resched = need_resched(); |
| 446 | preempt_disable_notrace(); |
| 447 | |
| 448 | return resched; |
| 449 | } |
| 450 | |
| 451 | /** |
| 452 | * ftrace_preempt_enable - enable preemption scheduler safe |
| 453 | * @resched: the return value from ftrace_preempt_disable |
| 454 | * |
| 455 | * This is a scheduler safe way to enable preemption and not miss |
| 456 | * any preemption checks. The disabled saved the state of preemption. |
| 457 | * If resched is set, then we were either inside an atomic or |
| 458 | * are inside the scheduler (we would have already scheduled |
| 459 | * otherwise). In this case, we do not want to call normal |
| 460 | * preempt_enable, but preempt_enable_no_resched instead. |
| 461 | */ |
| 462 | static inline void ftrace_preempt_enable(int resched) |
| 463 | { |
| 464 | if (resched) |
| 465 | preempt_enable_no_resched_notrace(); |
| 466 | else |
| 467 | preempt_enable_notrace(); |
| 468 | } |
| 469 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 470 | #endif /* _LINUX_KERNEL_TRACE_H */ |