Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counters: |
| 3 | * |
| 4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * Data type definitions, declarations, prototypes. |
| 8 | * |
| 9 | * Started by: Thomas Gleixner and Ingo Molnar |
| 10 | * |
| 11 | * For licencing details see kernel-base/COPYING |
| 12 | */ |
| 13 | #ifndef _LINUX_PERF_COUNTER_H |
| 14 | #define _LINUX_PERF_COUNTER_H |
| 15 | |
| 16 | #include <asm/atomic.h> |
| 17 | |
| 18 | #include <linux/list.h> |
| 19 | #include <linux/mutex.h> |
| 20 | #include <linux/rculist.h> |
| 21 | #include <linux/rcupdate.h> |
| 22 | #include <linux/spinlock.h> |
| 23 | |
| 24 | struct task_struct; |
| 25 | |
| 26 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 27 | * User-space ABI bits: |
| 28 | */ |
| 29 | |
| 30 | /* |
| 31 | * Generalized performance counter event types, used by the hw_event.type |
| 32 | * parameter of the sys_perf_counter_open() syscall: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 33 | */ |
| 34 | enum hw_event_types { |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 35 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 36 | * Common hardware events, generalized by the kernel: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 37 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 38 | PERF_COUNT_CYCLES = 0, |
| 39 | PERF_COUNT_INSTRUCTIONS = 1, |
| 40 | PERF_COUNT_CACHE_REFERENCES = 2, |
| 41 | PERF_COUNT_CACHE_MISSES = 3, |
| 42 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, |
| 43 | PERF_COUNT_BRANCH_MISSES = 5, |
| 44 | |
| 45 | /* |
| 46 | * Special "software" counters provided by the kernel, even if |
| 47 | * the hardware does not support performance counters. These |
| 48 | * counters measure various physical and sw events of the |
| 49 | * kernel (and allow the profiling of them as well): |
| 50 | */ |
| 51 | PERF_COUNT_CPU_CLOCK = -1, |
| 52 | PERF_COUNT_TASK_CLOCK = -2, |
Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 53 | /* |
| 54 | * Future software events: |
| 55 | */ |
| 56 | /* PERF_COUNT_PAGE_FAULTS = -3, |
| 57 | PERF_COUNT_CONTEXT_SWITCHES = -4, */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 58 | }; |
| 59 | |
| 60 | /* |
| 61 | * IRQ-notification data record type: |
| 62 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 63 | enum perf_counter_record_type { |
| 64 | PERF_RECORD_SIMPLE = 0, |
| 65 | PERF_RECORD_IRQ = 1, |
| 66 | PERF_RECORD_GROUP = 2, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 67 | }; |
| 68 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Hardware event to monitor via a performance monitoring counter: |
| 71 | */ |
| 72 | struct perf_counter_hw_event { |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 73 | s64 type; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 74 | |
| 75 | u64 irq_period; |
| 76 | u32 record_type; |
| 77 | |
| 78 | u32 disabled : 1, /* off by default */ |
| 79 | nmi : 1, /* NMI sampling */ |
| 80 | raw : 1, /* raw event type */ |
| 81 | __reserved_1 : 29; |
| 82 | |
| 83 | u64 __reserved_2; |
Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 84 | }; |
| 85 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 86 | /* |
| 87 | * Kernel-internal data types: |
| 88 | */ |
| 89 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 90 | /** |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 91 | * struct hw_perf_counter - performance counter hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 92 | */ |
| 93 | struct hw_perf_counter { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 94 | u64 config; |
| 95 | unsigned long config_base; |
| 96 | unsigned long counter_base; |
| 97 | int nmi; |
| 98 | unsigned int idx; |
| 99 | u64 prev_count; |
| 100 | u64 irq_period; |
| 101 | s32 next_count; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 102 | }; |
| 103 | |
| 104 | /* |
| 105 | * Hardcoded buffer length limit for now, for IRQ-fed events: |
| 106 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 107 | #define PERF_DATA_BUFLEN 2048 |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 108 | |
| 109 | /** |
| 110 | * struct perf_data - performance counter IRQ data sampling ... |
| 111 | */ |
| 112 | struct perf_data { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 113 | int len; |
| 114 | int rd_idx; |
| 115 | int overrun; |
| 116 | u8 data[PERF_DATA_BUFLEN]; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 117 | }; |
| 118 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 119 | struct perf_counter; |
| 120 | |
| 121 | /** |
| 122 | * struct hw_perf_counter_ops - performance counter hw ops |
| 123 | */ |
| 124 | struct hw_perf_counter_ops { |
| 125 | void (*hw_perf_counter_enable) (struct perf_counter *counter); |
| 126 | void (*hw_perf_counter_disable) (struct perf_counter *counter); |
| 127 | void (*hw_perf_counter_read) (struct perf_counter *counter); |
| 128 | }; |
| 129 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 130 | /** |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame^] | 131 | * enum perf_counter_active_state - the states of a counter |
| 132 | */ |
| 133 | enum perf_counter_active_state { |
| 134 | PERF_COUNTER_STATE_OFF = -1, |
| 135 | PERF_COUNTER_STATE_INACTIVE = 0, |
| 136 | PERF_COUNTER_STATE_ACTIVE = 1, |
| 137 | }; |
| 138 | |
| 139 | /** |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 140 | * struct perf_counter - performance counter kernel representation: |
| 141 | */ |
| 142 | struct perf_counter { |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 143 | struct list_head list_entry; |
| 144 | struct list_head sibling_list; |
| 145 | struct perf_counter *group_leader; |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 146 | const struct hw_perf_counter_ops *hw_ops; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 147 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame^] | 148 | enum perf_counter_active_state state; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 149 | #if BITS_PER_LONG == 64 |
| 150 | atomic64_t count; |
| 151 | #else |
| 152 | atomic_t count32[2]; |
| 153 | #endif |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 154 | struct perf_counter_hw_event hw_event; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 155 | struct hw_perf_counter hw; |
| 156 | |
| 157 | struct perf_counter_context *ctx; |
| 158 | struct task_struct *task; |
| 159 | |
| 160 | /* |
| 161 | * Protect attach/detach: |
| 162 | */ |
| 163 | struct mutex mutex; |
| 164 | |
| 165 | int oncpu; |
| 166 | int cpu; |
| 167 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 168 | /* read() / irq related data */ |
| 169 | wait_queue_head_t waitq; |
| 170 | /* optional: for NMIs */ |
| 171 | int wakeup_pending; |
| 172 | struct perf_data *irqdata; |
| 173 | struct perf_data *usrdata; |
| 174 | struct perf_data data[2]; |
| 175 | }; |
| 176 | |
| 177 | /** |
| 178 | * struct perf_counter_context - counter context structure |
| 179 | * |
| 180 | * Used as a container for task counters and CPU counters as well: |
| 181 | */ |
| 182 | struct perf_counter_context { |
| 183 | #ifdef CONFIG_PERF_COUNTERS |
| 184 | /* |
| 185 | * Protect the list of counters: |
| 186 | */ |
| 187 | spinlock_t lock; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 188 | |
| 189 | struct list_head counter_list; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 190 | int nr_counters; |
| 191 | int nr_active; |
| 192 | struct task_struct *task; |
| 193 | #endif |
| 194 | }; |
| 195 | |
| 196 | /** |
| 197 | * struct perf_counter_cpu_context - per cpu counter context structure |
| 198 | */ |
| 199 | struct perf_cpu_context { |
| 200 | struct perf_counter_context ctx; |
| 201 | struct perf_counter_context *task_ctx; |
| 202 | int active_oncpu; |
| 203 | int max_pertask; |
| 204 | }; |
| 205 | |
| 206 | /* |
| 207 | * Set by architecture code: |
| 208 | */ |
| 209 | extern int perf_max_counters; |
| 210 | |
| 211 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 212 | extern const struct hw_perf_counter_ops * |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 213 | hw_perf_counter_init(struct perf_counter *counter); |
| 214 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 215 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
| 216 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); |
| 217 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
| 218 | extern void perf_counter_init_task(struct task_struct *task); |
| 219 | extern void perf_counter_notify(struct pt_regs *regs); |
| 220 | extern void perf_counter_print_debug(void); |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 221 | extern u64 hw_perf_save_disable(void); |
| 222 | extern void hw_perf_restore(u64 ctrl); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 223 | extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); |
| 224 | extern u64 atomic64_counter_read(struct perf_counter *counter); |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 225 | extern int perf_counter_task_disable(void); |
| 226 | extern int perf_counter_task_enable(void); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 227 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 228 | #else |
| 229 | static inline void |
| 230 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
| 231 | static inline void |
| 232 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } |
| 233 | static inline void |
| 234 | perf_counter_task_tick(struct task_struct *task, int cpu) { } |
| 235 | static inline void perf_counter_init_task(struct task_struct *task) { } |
| 236 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
| 237 | static inline void perf_counter_print_debug(void) { } |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 238 | static inline void hw_perf_restore(u64 ctrl) { } |
| 239 | static inline u64 hw_perf_save_disable(void) { return 0; } |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 240 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
| 241 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 242 | #endif |
| 243 | |
| 244 | #endif /* _LINUX_PERF_COUNTER_H */ |