Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counters: |
| 3 | * |
| 4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * Data type definitions, declarations, prototypes. |
| 8 | * |
| 9 | * Started by: Thomas Gleixner and Ingo Molnar |
| 10 | * |
| 11 | * For licencing details see kernel-base/COPYING |
| 12 | */ |
| 13 | #ifndef _LINUX_PERF_COUNTER_H |
| 14 | #define _LINUX_PERF_COUNTER_H |
| 15 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 16 | #include <linux/types.h> |
| 17 | #include <linux/ioctl.h> |
Paul Mackerras | 9aaa131 | 2009-03-21 15:31:47 +1100 | [diff] [blame] | 18 | #include <asm/byteorder.h> |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 19 | |
| 20 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 21 | * User-space ABI bits: |
| 22 | */ |
| 23 | |
| 24 | /* |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 25 | * hw_event.type |
| 26 | */ |
| 27 | enum perf_event_types { |
| 28 | PERF_TYPE_HARDWARE = 0, |
| 29 | PERF_TYPE_SOFTWARE = 1, |
| 30 | PERF_TYPE_TRACEPOINT = 2, |
| 31 | |
| 32 | /* |
| 33 | * available TYPE space, raw is the max value. |
| 34 | */ |
| 35 | |
| 36 | PERF_TYPE_RAW = 128, |
| 37 | }; |
| 38 | |
| 39 | /* |
| 40 | * Generalized performance counter event types, used by the hw_event.event_id |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 41 | * parameter of the sys_perf_counter_open() syscall: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 42 | */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 43 | enum hw_event_ids { |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 44 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 45 | * Common hardware events, generalized by the kernel: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 46 | */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 47 | PERF_COUNT_CPU_CYCLES = 0, |
| 48 | PERF_COUNT_INSTRUCTIONS = 1, |
| 49 | PERF_COUNT_CACHE_REFERENCES = 2, |
| 50 | PERF_COUNT_CACHE_MISSES = 3, |
| 51 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, |
| 52 | PERF_COUNT_BRANCH_MISSES = 5, |
| 53 | PERF_COUNT_BUS_CYCLES = 6, |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 54 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 55 | PERF_HW_EVENTS_MAX = 7, |
| 56 | }; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 57 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Special "software" counters provided by the kernel, even if the hardware |
| 60 | * does not support performance counters. These counters measure various |
| 61 | * physical and sw events of the kernel (and allow the profiling of them as |
| 62 | * well): |
| 63 | */ |
| 64 | enum sw_event_ids { |
| 65 | PERF_COUNT_CPU_CLOCK = 0, |
| 66 | PERF_COUNT_TASK_CLOCK = 1, |
| 67 | PERF_COUNT_PAGE_FAULTS = 2, |
| 68 | PERF_COUNT_CONTEXT_SWITCHES = 3, |
| 69 | PERF_COUNT_CPU_MIGRATIONS = 4, |
| 70 | PERF_COUNT_PAGE_FAULTS_MIN = 5, |
| 71 | PERF_COUNT_PAGE_FAULTS_MAJ = 6, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 72 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 73 | PERF_SW_EVENTS_MAX = 7, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | /* |
| 77 | * IRQ-notification data record type: |
| 78 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 79 | enum perf_counter_record_type { |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 80 | PERF_RECORD_SIMPLE = 0, |
| 81 | PERF_RECORD_IRQ = 1, |
| 82 | PERF_RECORD_GROUP = 2, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 83 | }; |
| 84 | |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 85 | #define __PERF_COUNTER_MASK(name) \ |
| 86 | (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ |
| 87 | PERF_COUNTER_##name##_SHIFT) |
| 88 | |
| 89 | #define PERF_COUNTER_RAW_BITS 1 |
| 90 | #define PERF_COUNTER_RAW_SHIFT 63 |
| 91 | #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) |
| 92 | |
| 93 | #define PERF_COUNTER_CONFIG_BITS 63 |
| 94 | #define PERF_COUNTER_CONFIG_SHIFT 0 |
| 95 | #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) |
| 96 | |
| 97 | #define PERF_COUNTER_TYPE_BITS 7 |
| 98 | #define PERF_COUNTER_TYPE_SHIFT 56 |
| 99 | #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) |
| 100 | |
| 101 | #define PERF_COUNTER_EVENT_BITS 56 |
| 102 | #define PERF_COUNTER_EVENT_SHIFT 0 |
| 103 | #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) |
| 104 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 105 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame^] | 106 | * Bits that can be set in hw_event.read_format to request that |
| 107 | * reads on the counter should return the indicated quantities, |
| 108 | * in increasing order of bit value, after the counter value. |
| 109 | */ |
| 110 | enum perf_counter_read_format { |
| 111 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1, |
| 112 | PERF_FORMAT_TOTAL_TIME_RUNNING = 2, |
| 113 | }; |
| 114 | |
| 115 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 116 | * Hardware event to monitor via a performance monitoring counter: |
| 117 | */ |
| 118 | struct perf_counter_hw_event { |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 119 | /* |
| 120 | * The MSB of the config word signifies if the rest contains cpu |
| 121 | * specific (raw) counter configuration data, if unset, the next |
| 122 | * 7 bits are an event type and the rest of the bits are the event |
| 123 | * identifier. |
| 124 | */ |
| 125 | __u64 config; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 126 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 127 | __u64 irq_period; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 128 | __u64 record_type; |
| 129 | __u64 read_format; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 130 | |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 131 | __u64 disabled : 1, /* off by default */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 132 | nmi : 1, /* NMI sampling */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 133 | inherit : 1, /* children inherit it */ |
| 134 | pinned : 1, /* must always be on PMU */ |
| 135 | exclusive : 1, /* only group on PMU */ |
| 136 | exclude_user : 1, /* don't count user */ |
| 137 | exclude_kernel : 1, /* ditto kernel */ |
| 138 | exclude_hv : 1, /* ditto hypervisor */ |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 139 | exclude_idle : 1, /* don't count when idle */ |
Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 140 | include_tid : 1, /* include the tid */ |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 141 | |
Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 142 | __reserved_1 : 54; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 143 | |
| 144 | __u32 extra_config_len; |
| 145 | __u32 __reserved_4; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 146 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 147 | __u64 __reserved_2; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 148 | __u64 __reserved_3; |
Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 149 | }; |
| 150 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 151 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 152 | * Ioctls that can be done on a perf counter fd: |
| 153 | */ |
| 154 | #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) |
| 155 | #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) |
| 156 | |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 157 | /* |
| 158 | * Structure of the page that can be mapped via mmap |
| 159 | */ |
| 160 | struct perf_counter_mmap_page { |
| 161 | __u32 version; /* version number of this structure */ |
| 162 | __u32 compat_version; /* lowest version this is compat with */ |
| 163 | __u32 lock; /* seqlock for synchronization */ |
| 164 | __u32 index; /* hardware counter identifier */ |
| 165 | __s64 offset; /* add to hardware counter value */ |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 166 | |
| 167 | __u32 data_head; /* head in the data section */ |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 168 | }; |
| 169 | |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 170 | struct perf_event_header { |
| 171 | __u32 type; |
| 172 | __u32 size; |
| 173 | }; |
| 174 | |
| 175 | enum perf_event_type { |
| 176 | PERF_EVENT_IP = 0, |
| 177 | PERF_EVENT_GROUP = 1, |
Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 178 | |
| 179 | __PERF_EVENT_TID = 0x100, |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 180 | }; |
| 181 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 182 | #ifdef __KERNEL__ |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 183 | /* |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 184 | * Kernel-internal data types and definitions: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 185 | */ |
| 186 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 187 | #ifdef CONFIG_PERF_COUNTERS |
| 188 | # include <asm/perf_counter.h> |
| 189 | #endif |
| 190 | |
| 191 | #include <linux/list.h> |
| 192 | #include <linux/mutex.h> |
| 193 | #include <linux/rculist.h> |
| 194 | #include <linux/rcupdate.h> |
| 195 | #include <linux/spinlock.h> |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 196 | #include <linux/hrtimer.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 197 | #include <asm/atomic.h> |
| 198 | |
| 199 | struct task_struct; |
| 200 | |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 201 | static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) |
| 202 | { |
| 203 | return hw_event->config & PERF_COUNTER_RAW_MASK; |
| 204 | } |
| 205 | |
| 206 | static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) |
| 207 | { |
| 208 | return hw_event->config & PERF_COUNTER_CONFIG_MASK; |
| 209 | } |
| 210 | |
| 211 | static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) |
| 212 | { |
| 213 | return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> |
| 214 | PERF_COUNTER_TYPE_SHIFT; |
| 215 | } |
| 216 | |
| 217 | static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) |
| 218 | { |
| 219 | return hw_event->config & PERF_COUNTER_EVENT_MASK; |
| 220 | } |
| 221 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 222 | /** |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 223 | * struct hw_perf_counter - performance counter hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 224 | */ |
| 225 | struct hw_perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 226 | #ifdef CONFIG_PERF_COUNTERS |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 227 | union { |
| 228 | struct { /* hardware */ |
| 229 | u64 config; |
| 230 | unsigned long config_base; |
| 231 | unsigned long counter_base; |
| 232 | int nmi; |
| 233 | unsigned int idx; |
| 234 | }; |
| 235 | union { /* software */ |
| 236 | atomic64_t count; |
| 237 | struct hrtimer hrtimer; |
| 238 | }; |
| 239 | }; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 240 | atomic64_t prev_count; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 241 | u64 irq_period; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 242 | atomic64_t period_left; |
| 243 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 244 | }; |
| 245 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 246 | struct perf_counter; |
| 247 | |
| 248 | /** |
| 249 | * struct hw_perf_counter_ops - performance counter hw ops |
| 250 | */ |
| 251 | struct hw_perf_counter_ops { |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 252 | int (*enable) (struct perf_counter *counter); |
Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 253 | void (*disable) (struct perf_counter *counter); |
| 254 | void (*read) (struct perf_counter *counter); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 255 | }; |
| 256 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 257 | /** |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 258 | * enum perf_counter_active_state - the states of a counter |
| 259 | */ |
| 260 | enum perf_counter_active_state { |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 261 | PERF_COUNTER_STATE_ERROR = -2, |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 262 | PERF_COUNTER_STATE_OFF = -1, |
| 263 | PERF_COUNTER_STATE_INACTIVE = 0, |
| 264 | PERF_COUNTER_STATE_ACTIVE = 1, |
| 265 | }; |
| 266 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 267 | struct file; |
| 268 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 269 | struct perf_mmap_data { |
| 270 | struct rcu_head rcu_head; |
| 271 | int nr_pages; |
Peter Zijlstra | c7138f3 | 2009-03-24 13:18:16 +0100 | [diff] [blame] | 272 | atomic_t wakeup; |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 273 | atomic_t head; |
| 274 | struct perf_counter_mmap_page *user_page; |
| 275 | void *data_pages[0]; |
| 276 | }; |
| 277 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 278 | /** |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 279 | * struct perf_counter - performance counter kernel representation: |
| 280 | */ |
| 281 | struct perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 282 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 283 | struct list_head list_entry; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 284 | struct list_head event_entry; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 285 | struct list_head sibling_list; |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 286 | int nr_siblings; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 287 | struct perf_counter *group_leader; |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 288 | const struct hw_perf_counter_ops *hw_ops; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 289 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 290 | enum perf_counter_active_state state; |
Paul Mackerras | c07c99b | 2009-02-13 22:10:34 +1100 | [diff] [blame] | 291 | enum perf_counter_active_state prev_state; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 292 | atomic64_t count; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 293 | |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame^] | 294 | /* |
| 295 | * These are the total time in nanoseconds that the counter |
| 296 | * has been enabled (i.e. eligible to run, and the task has |
| 297 | * been scheduled in, if this is a per-task counter) |
| 298 | * and running (scheduled onto the CPU), respectively. |
| 299 | * |
| 300 | * They are computed from tstamp_enabled, tstamp_running and |
| 301 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. |
| 302 | */ |
| 303 | u64 total_time_enabled; |
| 304 | u64 total_time_running; |
| 305 | |
| 306 | /* |
| 307 | * These are timestamps used for computing total_time_enabled |
| 308 | * and total_time_running when the counter is in INACTIVE or |
| 309 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 310 | * in time. |
| 311 | * tstamp_enabled: the notional time when the counter was enabled |
| 312 | * tstamp_running: the notional time when the counter was scheduled on |
| 313 | * tstamp_stopped: in INACTIVE state, the notional time when the |
| 314 | * counter was scheduled off. |
| 315 | */ |
| 316 | u64 tstamp_enabled; |
| 317 | u64 tstamp_running; |
| 318 | u64 tstamp_stopped; |
| 319 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 320 | struct perf_counter_hw_event hw_event; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 321 | struct hw_perf_counter hw; |
| 322 | |
| 323 | struct perf_counter_context *ctx; |
| 324 | struct task_struct *task; |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 325 | struct file *filp; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 326 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 327 | struct perf_counter *parent; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 328 | struct list_head child_list; |
| 329 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 330 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame^] | 331 | * These accumulate total time (in nanoseconds) that children |
| 332 | * counters have been enabled and running, respectively. |
| 333 | */ |
| 334 | atomic64_t child_total_time_enabled; |
| 335 | atomic64_t child_total_time_running; |
| 336 | |
| 337 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 338 | * Protect attach/detach and child_list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 339 | */ |
| 340 | struct mutex mutex; |
| 341 | |
| 342 | int oncpu; |
| 343 | int cpu; |
| 344 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 345 | /* mmap bits */ |
| 346 | struct mutex mmap_mutex; |
| 347 | atomic_t mmap_count; |
| 348 | struct perf_mmap_data *data; |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 349 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 350 | /* poll related */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 351 | wait_queue_head_t waitq; |
| 352 | /* optional: for NMIs */ |
| 353 | int wakeup_pending; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 354 | |
Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 355 | void (*destroy)(struct perf_counter *); |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 356 | struct rcu_head rcu_head; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 357 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 358 | }; |
| 359 | |
| 360 | /** |
| 361 | * struct perf_counter_context - counter context structure |
| 362 | * |
| 363 | * Used as a container for task counters and CPU counters as well: |
| 364 | */ |
| 365 | struct perf_counter_context { |
| 366 | #ifdef CONFIG_PERF_COUNTERS |
| 367 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 368 | * Protect the states of the counters in the list, |
| 369 | * nr_active, and the list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 370 | */ |
| 371 | spinlock_t lock; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 372 | /* |
| 373 | * Protect the list of counters. Locking either mutex or lock |
| 374 | * is sufficient to ensure the list doesn't change; to change |
| 375 | * the list you need to lock both the mutex and the spinlock. |
| 376 | */ |
| 377 | struct mutex mutex; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 378 | |
| 379 | struct list_head counter_list; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 380 | struct list_head event_list; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 381 | int nr_counters; |
| 382 | int nr_active; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 383 | int is_active; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 384 | struct task_struct *task; |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame^] | 385 | |
| 386 | /* |
| 387 | * time_now is the current time in nanoseconds since an arbitrary |
| 388 | * point in the past. For per-task counters, this is based on the |
| 389 | * task clock, and for per-cpu counters it is based on the cpu clock. |
| 390 | * time_lost is an offset from the task/cpu clock, used to make it |
| 391 | * appear that time only passes while the context is scheduled in. |
| 392 | */ |
| 393 | u64 time_now; |
| 394 | u64 time_lost; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 395 | #endif |
| 396 | }; |
| 397 | |
| 398 | /** |
| 399 | * struct perf_counter_cpu_context - per cpu counter context structure |
| 400 | */ |
| 401 | struct perf_cpu_context { |
| 402 | struct perf_counter_context ctx; |
| 403 | struct perf_counter_context *task_ctx; |
| 404 | int active_oncpu; |
| 405 | int max_pertask; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 406 | int exclusive; |
Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 407 | |
| 408 | /* |
| 409 | * Recursion avoidance: |
| 410 | * |
| 411 | * task, softirq, irq, nmi context |
| 412 | */ |
| 413 | int recursion[4]; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 414 | }; |
| 415 | |
| 416 | /* |
| 417 | * Set by architecture code: |
| 418 | */ |
| 419 | extern int perf_max_counters; |
| 420 | |
| 421 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 422 | extern const struct hw_perf_counter_ops * |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 423 | hw_perf_counter_init(struct perf_counter *counter); |
| 424 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 425 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
| 426 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); |
| 427 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 428 | extern void perf_counter_init_task(struct task_struct *child); |
| 429 | extern void perf_counter_exit_task(struct task_struct *child); |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 430 | extern void perf_counter_notify(struct pt_regs *regs); |
| 431 | extern void perf_counter_print_debug(void); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 432 | extern void perf_counter_unthrottle(void); |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 433 | extern u64 hw_perf_save_disable(void); |
| 434 | extern void hw_perf_restore(u64 ctrl); |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 435 | extern int perf_counter_task_disable(void); |
| 436 | extern int perf_counter_task_enable(void); |
Paul Mackerras | 3cbed42 | 2009-01-09 16:43:42 +1100 | [diff] [blame] | 437 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
| 438 | struct perf_cpu_context *cpuctx, |
| 439 | struct perf_counter_context *ctx, int cpu); |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 440 | extern void perf_counter_update_userpage(struct perf_counter *counter); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 441 | |
Peter Zijlstra | 0322cd6 | 2009-03-19 20:26:19 +0100 | [diff] [blame] | 442 | extern void perf_counter_output(struct perf_counter *counter, |
| 443 | int nmi, struct pt_regs *regs); |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 444 | /* |
| 445 | * Return 1 for a software counter, 0 for a hardware counter |
| 446 | */ |
| 447 | static inline int is_software_counter(struct perf_counter *counter) |
| 448 | { |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 449 | return !perf_event_raw(&counter->hw_event) && |
| 450 | perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 451 | } |
| 452 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 453 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 454 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 455 | #else |
| 456 | static inline void |
| 457 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
| 458 | static inline void |
| 459 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } |
| 460 | static inline void |
| 461 | perf_counter_task_tick(struct task_struct *task, int cpu) { } |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 462 | static inline void perf_counter_init_task(struct task_struct *child) { } |
| 463 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 464 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
| 465 | static inline void perf_counter_print_debug(void) { } |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 466 | static inline void perf_counter_unthrottle(void) { } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 467 | static inline void hw_perf_restore(u64 ctrl) { } |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 468 | static inline u64 hw_perf_save_disable(void) { return 0; } |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 469 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
| 470 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 471 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 472 | static inline void perf_swcounter_event(u32 event, u64 nr, |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 473 | int nmi, struct pt_regs *regs) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 474 | #endif |
| 475 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 476 | #endif /* __KERNEL__ */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 477 | #endif /* _LINUX_PERF_COUNTER_H */ |