Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counters: |
| 3 | * |
| 4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * Data type definitions, declarations, prototypes. |
| 8 | * |
| 9 | * Started by: Thomas Gleixner and Ingo Molnar |
| 10 | * |
| 11 | * For licencing details see kernel-base/COPYING |
| 12 | */ |
| 13 | #ifndef _LINUX_PERF_COUNTER_H |
| 14 | #define _LINUX_PERF_COUNTER_H |
| 15 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 16 | #include <linux/types.h> |
| 17 | #include <linux/ioctl.h> |
Paul Mackerras | 9aaa131 | 2009-03-21 15:31:47 +1100 | [diff] [blame] | 18 | #include <asm/byteorder.h> |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 19 | |
| 20 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 21 | * User-space ABI bits: |
| 22 | */ |
| 23 | |
| 24 | /* |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 25 | * hw_event.type |
| 26 | */ |
| 27 | enum perf_event_types { |
| 28 | PERF_TYPE_HARDWARE = 0, |
| 29 | PERF_TYPE_SOFTWARE = 1, |
| 30 | PERF_TYPE_TRACEPOINT = 2, |
| 31 | |
| 32 | /* |
| 33 | * available TYPE space, raw is the max value. |
| 34 | */ |
| 35 | |
| 36 | PERF_TYPE_RAW = 128, |
| 37 | }; |
| 38 | |
| 39 | /* |
| 40 | * Generalized performance counter event types, used by the hw_event.event_id |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 41 | * parameter of the sys_perf_counter_open() syscall: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 42 | */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 43 | enum hw_event_ids { |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 44 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 45 | * Common hardware events, generalized by the kernel: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 46 | */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 47 | PERF_COUNT_CPU_CYCLES = 0, |
| 48 | PERF_COUNT_INSTRUCTIONS = 1, |
| 49 | PERF_COUNT_CACHE_REFERENCES = 2, |
| 50 | PERF_COUNT_CACHE_MISSES = 3, |
| 51 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, |
| 52 | PERF_COUNT_BRANCH_MISSES = 5, |
| 53 | PERF_COUNT_BUS_CYCLES = 6, |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 54 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 55 | PERF_HW_EVENTS_MAX = 7, |
| 56 | }; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 57 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Special "software" counters provided by the kernel, even if the hardware |
| 60 | * does not support performance counters. These counters measure various |
| 61 | * physical and sw events of the kernel (and allow the profiling of them as |
| 62 | * well): |
| 63 | */ |
| 64 | enum sw_event_ids { |
| 65 | PERF_COUNT_CPU_CLOCK = 0, |
| 66 | PERF_COUNT_TASK_CLOCK = 1, |
| 67 | PERF_COUNT_PAGE_FAULTS = 2, |
| 68 | PERF_COUNT_CONTEXT_SWITCHES = 3, |
| 69 | PERF_COUNT_CPU_MIGRATIONS = 4, |
| 70 | PERF_COUNT_PAGE_FAULTS_MIN = 5, |
| 71 | PERF_COUNT_PAGE_FAULTS_MAJ = 6, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 72 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 73 | PERF_SW_EVENTS_MAX = 7, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 74 | }; |
| 75 | |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 76 | #define __PERF_COUNTER_MASK(name) \ |
| 77 | (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ |
| 78 | PERF_COUNTER_##name##_SHIFT) |
| 79 | |
| 80 | #define PERF_COUNTER_RAW_BITS 1 |
| 81 | #define PERF_COUNTER_RAW_SHIFT 63 |
| 82 | #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) |
| 83 | |
| 84 | #define PERF_COUNTER_CONFIG_BITS 63 |
| 85 | #define PERF_COUNTER_CONFIG_SHIFT 0 |
| 86 | #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) |
| 87 | |
| 88 | #define PERF_COUNTER_TYPE_BITS 7 |
| 89 | #define PERF_COUNTER_TYPE_SHIFT 56 |
| 90 | #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) |
| 91 | |
| 92 | #define PERF_COUNTER_EVENT_BITS 56 |
| 93 | #define PERF_COUNTER_EVENT_SHIFT 0 |
| 94 | #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) |
| 95 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 96 | /* |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 97 | * Bits that can be set in hw_event.record_type to request information |
| 98 | * in the overflow packets. |
| 99 | */ |
| 100 | enum perf_counter_record_format { |
| 101 | PERF_RECORD_IP = 1U << 0, |
| 102 | PERF_RECORD_TID = 1U << 1, |
| 103 | PERF_RECORD_GROUP = 1U << 2, |
| 104 | PERF_RECORD_CALLCHAIN = 1U << 3, |
Peter Zijlstra | 339f7c9 | 2009-04-06 11:45:06 +0200 | [diff] [blame] | 105 | PERF_RECORD_TIME = 1U << 4, |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 106 | }; |
| 107 | |
| 108 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 109 | * Bits that can be set in hw_event.read_format to request that |
| 110 | * reads on the counter should return the indicated quantities, |
| 111 | * in increasing order of bit value, after the counter value. |
| 112 | */ |
| 113 | enum perf_counter_read_format { |
| 114 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1, |
| 115 | PERF_FORMAT_TOTAL_TIME_RUNNING = 2, |
| 116 | }; |
| 117 | |
| 118 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 119 | * Hardware event to monitor via a performance monitoring counter: |
| 120 | */ |
| 121 | struct perf_counter_hw_event { |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 122 | /* |
| 123 | * The MSB of the config word signifies if the rest contains cpu |
| 124 | * specific (raw) counter configuration data, if unset, the next |
| 125 | * 7 bits are an event type and the rest of the bits are the event |
| 126 | * identifier. |
| 127 | */ |
| 128 | __u64 config; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 129 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 130 | __u64 irq_period; |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 131 | __u32 record_type; |
| 132 | __u32 read_format; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 133 | |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 134 | __u64 disabled : 1, /* off by default */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 135 | nmi : 1, /* NMI sampling */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 136 | inherit : 1, /* children inherit it */ |
| 137 | pinned : 1, /* must always be on PMU */ |
| 138 | exclusive : 1, /* only group on PMU */ |
| 139 | exclude_user : 1, /* don't count user */ |
| 140 | exclude_kernel : 1, /* ditto kernel */ |
| 141 | exclude_hv : 1, /* ditto hypervisor */ |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 142 | exclude_idle : 1, /* don't count when idle */ |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 143 | mmap : 1, /* include mmap data */ |
| 144 | munmap : 1, /* include munmap data */ |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 145 | |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 146 | __reserved_1 : 53; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 147 | |
| 148 | __u32 extra_config_len; |
Peter Zijlstra | c457810 | 2009-04-02 11:12:01 +0200 | [diff] [blame] | 149 | __u32 wakeup_events; /* wakeup every n events */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 150 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 151 | __u64 __reserved_2; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 152 | __u64 __reserved_3; |
Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 153 | }; |
| 154 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 155 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 156 | * Ioctls that can be done on a perf counter fd: |
| 157 | */ |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 158 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) |
| 159 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) |
| 160 | #define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 161 | |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 162 | /* |
| 163 | * Structure of the page that can be mapped via mmap |
| 164 | */ |
| 165 | struct perf_counter_mmap_page { |
| 166 | __u32 version; /* version number of this structure */ |
| 167 | __u32 compat_version; /* lowest version this is compat with */ |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * Bits needed to read the hw counters in user-space. |
| 171 | * |
Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 172 | * u32 seq; |
| 173 | * s64 count; |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 174 | * |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 175 | * do { |
| 176 | * seq = pc->lock; |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 177 | * |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 178 | * barrier() |
| 179 | * if (pc->index) { |
| 180 | * count = pmc_read(pc->index - 1); |
| 181 | * count += pc->offset; |
| 182 | * } else |
| 183 | * goto regular_read; |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 184 | * |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 185 | * barrier(); |
| 186 | * } while (pc->lock != seq); |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 187 | * |
Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 188 | * NOTE: for obvious reason this only works on self-monitoring |
| 189 | * processes. |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 190 | */ |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 191 | __u32 lock; /* seqlock for synchronization */ |
| 192 | __u32 index; /* hardware counter identifier */ |
| 193 | __s64 offset; /* add to hardware counter value */ |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 194 | |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 195 | /* |
| 196 | * Control data for the mmap() data buffer. |
| 197 | * |
| 198 | * User-space reading this value should issue an rmb(), on SMP capable |
| 199 | * platforms, after reading this value -- see perf_counter_wakeup(). |
| 200 | */ |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 201 | __u32 data_head; /* head in the data section */ |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 202 | }; |
| 203 | |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 204 | struct perf_event_header { |
| 205 | __u32 type; |
| 206 | __u32 size; |
| 207 | }; |
| 208 | |
| 209 | enum perf_event_type { |
Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 210 | |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame^] | 211 | /* |
| 212 | * The MMAP events record the PROT_EXEC mappings so that we can |
| 213 | * correlate userspace IPs to code. They have the following structure: |
| 214 | * |
| 215 | * struct { |
| 216 | * struct perf_event_header header; |
| 217 | * |
| 218 | * u32 pid, tid; |
| 219 | * u64 addr; |
| 220 | * u64 len; |
| 221 | * u64 pgoff; |
| 222 | * char filename[]; |
| 223 | * }; |
| 224 | */ |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 225 | PERF_EVENT_MMAP = 1, |
| 226 | PERF_EVENT_MUNMAP = 2, |
Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 227 | |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 228 | /* |
| 229 | * Half the event type space is reserved for the counter overflow |
| 230 | * bitfields, as found in hw_event.record_type. |
| 231 | * |
| 232 | * These events will have types of the form: |
| 233 | * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } * |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame^] | 234 | * |
| 235 | * struct { |
| 236 | * struct perf_event_header header; |
| 237 | * |
| 238 | * { u64 ip; } && __PERF_EVENT_IP |
| 239 | * { u32 pid, tid; } && __PERF_EVENT_TID |
| 240 | * |
| 241 | * { u64 nr; |
| 242 | * { u64 event, val; } cnt[nr]; } && __PERF_EVENT_GROUP |
| 243 | * |
| 244 | * { u16 nr, |
| 245 | * hv, |
| 246 | * kernel, |
| 247 | * user; |
| 248 | * u64 ips[nr]; } && __PERF_EVENT_CALLCHAIN |
| 249 | * |
| 250 | * { u64 time; } && __PERF_EVENT_TIME |
| 251 | * }; |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 252 | */ |
| 253 | PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31, |
| 254 | __PERF_EVENT_IP = PERF_RECORD_IP, |
| 255 | __PERF_EVENT_TID = PERF_RECORD_TID, |
| 256 | __PERF_EVENT_GROUP = PERF_RECORD_GROUP, |
| 257 | __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN, |
Peter Zijlstra | 339f7c9 | 2009-04-06 11:45:06 +0200 | [diff] [blame] | 258 | __PERF_EVENT_TIME = PERF_RECORD_TIME, |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 259 | }; |
| 260 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 261 | #ifdef __KERNEL__ |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 262 | /* |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 263 | * Kernel-internal data types and definitions: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 264 | */ |
| 265 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 266 | #ifdef CONFIG_PERF_COUNTERS |
| 267 | # include <asm/perf_counter.h> |
| 268 | #endif |
| 269 | |
| 270 | #include <linux/list.h> |
| 271 | #include <linux/mutex.h> |
| 272 | #include <linux/rculist.h> |
| 273 | #include <linux/rcupdate.h> |
| 274 | #include <linux/spinlock.h> |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 275 | #include <linux/hrtimer.h> |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 276 | #include <linux/fs.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 277 | #include <asm/atomic.h> |
| 278 | |
| 279 | struct task_struct; |
| 280 | |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 281 | static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) |
| 282 | { |
| 283 | return hw_event->config & PERF_COUNTER_RAW_MASK; |
| 284 | } |
| 285 | |
| 286 | static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) |
| 287 | { |
| 288 | return hw_event->config & PERF_COUNTER_CONFIG_MASK; |
| 289 | } |
| 290 | |
| 291 | static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) |
| 292 | { |
| 293 | return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> |
| 294 | PERF_COUNTER_TYPE_SHIFT; |
| 295 | } |
| 296 | |
| 297 | static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) |
| 298 | { |
| 299 | return hw_event->config & PERF_COUNTER_EVENT_MASK; |
| 300 | } |
| 301 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 302 | /** |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 303 | * struct hw_perf_counter - performance counter hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 304 | */ |
| 305 | struct hw_perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 306 | #ifdef CONFIG_PERF_COUNTERS |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 307 | union { |
| 308 | struct { /* hardware */ |
| 309 | u64 config; |
| 310 | unsigned long config_base; |
| 311 | unsigned long counter_base; |
| 312 | int nmi; |
| 313 | unsigned int idx; |
| 314 | }; |
| 315 | union { /* software */ |
| 316 | atomic64_t count; |
| 317 | struct hrtimer hrtimer; |
| 318 | }; |
| 319 | }; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 320 | atomic64_t prev_count; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 321 | u64 irq_period; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 322 | atomic64_t period_left; |
| 323 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 324 | }; |
| 325 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 326 | struct perf_counter; |
| 327 | |
| 328 | /** |
| 329 | * struct hw_perf_counter_ops - performance counter hw ops |
| 330 | */ |
| 331 | struct hw_perf_counter_ops { |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 332 | int (*enable) (struct perf_counter *counter); |
Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 333 | void (*disable) (struct perf_counter *counter); |
| 334 | void (*read) (struct perf_counter *counter); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 335 | }; |
| 336 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 337 | /** |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 338 | * enum perf_counter_active_state - the states of a counter |
| 339 | */ |
| 340 | enum perf_counter_active_state { |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 341 | PERF_COUNTER_STATE_ERROR = -2, |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 342 | PERF_COUNTER_STATE_OFF = -1, |
| 343 | PERF_COUNTER_STATE_INACTIVE = 0, |
| 344 | PERF_COUNTER_STATE_ACTIVE = 1, |
| 345 | }; |
| 346 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 347 | struct file; |
| 348 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 349 | struct perf_mmap_data { |
| 350 | struct rcu_head rcu_head; |
| 351 | int nr_pages; |
Peter Zijlstra | c7138f3 | 2009-03-24 13:18:16 +0100 | [diff] [blame] | 352 | atomic_t wakeup; |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 353 | atomic_t head; |
Peter Zijlstra | c457810 | 2009-04-02 11:12:01 +0200 | [diff] [blame] | 354 | atomic_t events; |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 355 | struct perf_counter_mmap_page *user_page; |
| 356 | void *data_pages[0]; |
| 357 | }; |
| 358 | |
Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 359 | struct perf_pending_entry { |
| 360 | struct perf_pending_entry *next; |
| 361 | void (*func)(struct perf_pending_entry *); |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 362 | }; |
| 363 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 364 | /** |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 365 | * struct perf_counter - performance counter kernel representation: |
| 366 | */ |
| 367 | struct perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 368 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 369 | struct list_head list_entry; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 370 | struct list_head event_entry; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 371 | struct list_head sibling_list; |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 372 | int nr_siblings; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 373 | struct perf_counter *group_leader; |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 374 | const struct hw_perf_counter_ops *hw_ops; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 375 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 376 | enum perf_counter_active_state state; |
Paul Mackerras | c07c99b | 2009-02-13 22:10:34 +1100 | [diff] [blame] | 377 | enum perf_counter_active_state prev_state; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 378 | atomic64_t count; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 379 | |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 380 | /* |
| 381 | * These are the total time in nanoseconds that the counter |
| 382 | * has been enabled (i.e. eligible to run, and the task has |
| 383 | * been scheduled in, if this is a per-task counter) |
| 384 | * and running (scheduled onto the CPU), respectively. |
| 385 | * |
| 386 | * They are computed from tstamp_enabled, tstamp_running and |
| 387 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. |
| 388 | */ |
| 389 | u64 total_time_enabled; |
| 390 | u64 total_time_running; |
| 391 | |
| 392 | /* |
| 393 | * These are timestamps used for computing total_time_enabled |
| 394 | * and total_time_running when the counter is in INACTIVE or |
| 395 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 396 | * in time. |
| 397 | * tstamp_enabled: the notional time when the counter was enabled |
| 398 | * tstamp_running: the notional time when the counter was scheduled on |
| 399 | * tstamp_stopped: in INACTIVE state, the notional time when the |
| 400 | * counter was scheduled off. |
| 401 | */ |
| 402 | u64 tstamp_enabled; |
| 403 | u64 tstamp_running; |
| 404 | u64 tstamp_stopped; |
| 405 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 406 | struct perf_counter_hw_event hw_event; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 407 | struct hw_perf_counter hw; |
| 408 | |
| 409 | struct perf_counter_context *ctx; |
| 410 | struct task_struct *task; |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 411 | struct file *filp; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 412 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 413 | struct perf_counter *parent; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 414 | struct list_head child_list; |
| 415 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 416 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 417 | * These accumulate total time (in nanoseconds) that children |
| 418 | * counters have been enabled and running, respectively. |
| 419 | */ |
| 420 | atomic64_t child_total_time_enabled; |
| 421 | atomic64_t child_total_time_running; |
| 422 | |
| 423 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 424 | * Protect attach/detach and child_list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 425 | */ |
| 426 | struct mutex mutex; |
| 427 | |
| 428 | int oncpu; |
| 429 | int cpu; |
| 430 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 431 | /* mmap bits */ |
| 432 | struct mutex mmap_mutex; |
| 433 | atomic_t mmap_count; |
| 434 | struct perf_mmap_data *data; |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 435 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 436 | /* poll related */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 437 | wait_queue_head_t waitq; |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 438 | struct fasync_struct *fasync; |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 439 | |
| 440 | /* delayed work for NMIs and such */ |
| 441 | int pending_wakeup; |
| 442 | int pending_disable; |
Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 443 | struct perf_pending_entry pending; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 444 | |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 445 | atomic_t event_limit; |
| 446 | |
Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 447 | void (*destroy)(struct perf_counter *); |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 448 | struct rcu_head rcu_head; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 449 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 450 | }; |
| 451 | |
| 452 | /** |
| 453 | * struct perf_counter_context - counter context structure |
| 454 | * |
| 455 | * Used as a container for task counters and CPU counters as well: |
| 456 | */ |
| 457 | struct perf_counter_context { |
| 458 | #ifdef CONFIG_PERF_COUNTERS |
| 459 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 460 | * Protect the states of the counters in the list, |
| 461 | * nr_active, and the list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 462 | */ |
| 463 | spinlock_t lock; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 464 | /* |
| 465 | * Protect the list of counters. Locking either mutex or lock |
| 466 | * is sufficient to ensure the list doesn't change; to change |
| 467 | * the list you need to lock both the mutex and the spinlock. |
| 468 | */ |
| 469 | struct mutex mutex; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 470 | |
| 471 | struct list_head counter_list; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 472 | struct list_head event_list; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 473 | int nr_counters; |
| 474 | int nr_active; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 475 | int is_active; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 476 | struct task_struct *task; |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 477 | |
| 478 | /* |
| 479 | * time_now is the current time in nanoseconds since an arbitrary |
| 480 | * point in the past. For per-task counters, this is based on the |
| 481 | * task clock, and for per-cpu counters it is based on the cpu clock. |
| 482 | * time_lost is an offset from the task/cpu clock, used to make it |
| 483 | * appear that time only passes while the context is scheduled in. |
| 484 | */ |
| 485 | u64 time_now; |
| 486 | u64 time_lost; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 487 | #endif |
| 488 | }; |
| 489 | |
| 490 | /** |
| 491 | * struct perf_counter_cpu_context - per cpu counter context structure |
| 492 | */ |
| 493 | struct perf_cpu_context { |
| 494 | struct perf_counter_context ctx; |
| 495 | struct perf_counter_context *task_ctx; |
| 496 | int active_oncpu; |
| 497 | int max_pertask; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 498 | int exclusive; |
Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 499 | |
| 500 | /* |
| 501 | * Recursion avoidance: |
| 502 | * |
| 503 | * task, softirq, irq, nmi context |
| 504 | */ |
| 505 | int recursion[4]; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 506 | }; |
| 507 | |
| 508 | /* |
| 509 | * Set by architecture code: |
| 510 | */ |
| 511 | extern int perf_max_counters; |
| 512 | |
| 513 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 514 | extern const struct hw_perf_counter_ops * |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 515 | hw_perf_counter_init(struct perf_counter *counter); |
| 516 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 517 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
| 518 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); |
| 519 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 520 | extern void perf_counter_init_task(struct task_struct *child); |
| 521 | extern void perf_counter_exit_task(struct task_struct *child); |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 522 | extern void perf_counter_do_pending(void); |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 523 | extern void perf_counter_print_debug(void); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 524 | extern void perf_counter_unthrottle(void); |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 525 | extern u64 hw_perf_save_disable(void); |
| 526 | extern void hw_perf_restore(u64 ctrl); |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 527 | extern int perf_counter_task_disable(void); |
| 528 | extern int perf_counter_task_enable(void); |
Paul Mackerras | 3cbed42 | 2009-01-09 16:43:42 +1100 | [diff] [blame] | 529 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
| 530 | struct perf_cpu_context *cpuctx, |
| 531 | struct perf_counter_context *ctx, int cpu); |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 532 | extern void perf_counter_update_userpage(struct perf_counter *counter); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 533 | |
Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 534 | extern int perf_counter_overflow(struct perf_counter *counter, |
| 535 | int nmi, struct pt_regs *regs); |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 536 | /* |
| 537 | * Return 1 for a software counter, 0 for a hardware counter |
| 538 | */ |
| 539 | static inline int is_software_counter(struct perf_counter *counter) |
| 540 | { |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 541 | return !perf_event_raw(&counter->hw_event) && |
| 542 | perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 543 | } |
| 544 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 545 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 546 | |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 547 | extern void perf_counter_mmap(unsigned long addr, unsigned long len, |
| 548 | unsigned long pgoff, struct file *file); |
| 549 | |
| 550 | extern void perf_counter_munmap(unsigned long addr, unsigned long len, |
| 551 | unsigned long pgoff, struct file *file); |
| 552 | |
Peter Zijlstra | 9c03d88 | 2009-04-06 11:45:00 +0200 | [diff] [blame] | 553 | #define MAX_STACK_DEPTH 255 |
Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 554 | |
| 555 | struct perf_callchain_entry { |
Peter Zijlstra | 9c03d88 | 2009-04-06 11:45:00 +0200 | [diff] [blame] | 556 | u16 nr, hv, kernel, user; |
Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 557 | u64 ip[MAX_STACK_DEPTH]; |
| 558 | }; |
| 559 | |
| 560 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); |
| 561 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 562 | #else |
| 563 | static inline void |
| 564 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
| 565 | static inline void |
| 566 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } |
| 567 | static inline void |
| 568 | perf_counter_task_tick(struct task_struct *task, int cpu) { } |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 569 | static inline void perf_counter_init_task(struct task_struct *child) { } |
| 570 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 571 | static inline void perf_counter_do_pending(void) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 572 | static inline void perf_counter_print_debug(void) { } |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 573 | static inline void perf_counter_unthrottle(void) { } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 574 | static inline void hw_perf_restore(u64 ctrl) { } |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 575 | static inline u64 hw_perf_save_disable(void) { return 0; } |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 576 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
| 577 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 578 | |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 579 | static inline void |
| 580 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } |
| 581 | |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 582 | |
| 583 | static inline void |
| 584 | perf_counter_mmap(unsigned long addr, unsigned long len, |
| 585 | unsigned long pgoff, struct file *file) { } |
| 586 | |
| 587 | static inline void |
| 588 | perf_counter_munmap(unsigned long addr, unsigned long len, |
| 589 | unsigned long pgoff, struct file *file) { } |
| 590 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 591 | #endif |
| 592 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 593 | #endif /* __KERNEL__ */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 594 | #endif /* _LINUX_PERF_COUNTER_H */ |