Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counter x86 architecture code |
| 3 | * |
| 4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * For licencing details see kernel-base/COPYING |
| 8 | */ |
| 9 | |
| 10 | #include <linux/perf_counter.h> |
| 11 | #include <linux/capability.h> |
| 12 | #include <linux/notifier.h> |
| 13 | #include <linux/hardirq.h> |
| 14 | #include <linux/kprobes.h> |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 15 | #include <linux/module.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 16 | #include <linux/kdebug.h> |
| 17 | #include <linux/sched.h> |
| 18 | |
Ingo Molnar | 5c167b8 | 2008-12-17 09:02:19 +0100 | [diff] [blame] | 19 | #include <asm/perf_counter.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 20 | #include <asm/apic.h> |
| 21 | |
| 22 | static bool perf_counters_initialized __read_mostly; |
| 23 | |
| 24 | /* |
| 25 | * Number of (generic) HW counters: |
| 26 | */ |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 27 | static int nr_counters_generic __read_mostly; |
| 28 | static u64 perf_counter_mask __read_mostly; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 29 | static u64 counter_value_mask __read_mostly; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 30 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 31 | static int nr_counters_fixed __read_mostly; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 32 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 33 | struct cpu_hw_counters { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 34 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
| 35 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 36 | unsigned long interrupts; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 37 | u64 global_enable; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 38 | }; |
| 39 | |
| 40 | /* |
| 41 | * Intel PerfMon v3. Used on Core2 and later. |
| 42 | */ |
| 43 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); |
| 44 | |
Jaswinder Singh | 94c4657 | 2008-12-19 22:37:58 +0530 | [diff] [blame] | 45 | static const int intel_perfmon_event_map[] = |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 46 | { |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 47 | [PERF_COUNT_CPU_CYCLES] = 0x003c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 48 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 49 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, |
| 50 | [PERF_COUNT_CACHE_MISSES] = 0x412e, |
| 51 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 52 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 53 | [PERF_COUNT_BUS_CYCLES] = 0x013c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 54 | }; |
| 55 | |
Jaswinder Singh | 94c4657 | 2008-12-19 22:37:58 +0530 | [diff] [blame] | 56 | static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 57 | |
| 58 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 59 | * Propagate counter elapsed time into the generic counter. |
| 60 | * Can only be executed on the CPU where the counter is active. |
| 61 | * Returns the delta events processed. |
| 62 | */ |
| 63 | static void |
| 64 | x86_perf_counter_update(struct perf_counter *counter, |
| 65 | struct hw_perf_counter *hwc, int idx) |
| 66 | { |
| 67 | u64 prev_raw_count, new_raw_count, delta; |
| 68 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Careful: an NMI might modify the previous counter value. |
| 71 | * |
| 72 | * Our tactic to handle this is to first atomically read and |
| 73 | * exchange a new raw count - then add that new-prev delta |
| 74 | * count to the generic counter atomically: |
| 75 | */ |
| 76 | again: |
| 77 | prev_raw_count = atomic64_read(&hwc->prev_count); |
| 78 | rdmsrl(hwc->counter_base + idx, new_raw_count); |
| 79 | |
| 80 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 81 | new_raw_count) != prev_raw_count) |
| 82 | goto again; |
| 83 | |
| 84 | /* |
| 85 | * Now we have the new raw value and have updated the prev |
| 86 | * timestamp already. We can now calculate the elapsed delta |
| 87 | * (counter-)time and add that to the generic counter. |
| 88 | * |
| 89 | * Careful, not all hw sign-extends above the physical width |
| 90 | * of the count, so we do that by clipping the delta to 32 bits: |
| 91 | */ |
| 92 | delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 93 | |
| 94 | atomic64_add(delta, &counter->count); |
| 95 | atomic64_sub(delta, &hwc->period_left); |
| 96 | } |
| 97 | |
| 98 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 99 | * Setup the hardware configuration for a given hw_event_type |
| 100 | */ |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 101 | static int __hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 102 | { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 103 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 104 | struct hw_perf_counter *hwc = &counter->hw; |
| 105 | |
| 106 | if (unlikely(!perf_counters_initialized)) |
| 107 | return -EINVAL; |
| 108 | |
| 109 | /* |
| 110 | * Count user events, and generate PMC IRQs: |
| 111 | * (keep 'enabled' bit clear for now) |
| 112 | */ |
| 113 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; |
| 114 | |
| 115 | /* |
| 116 | * If privileged enough, count OS events too, and allow |
| 117 | * NMI events as well: |
| 118 | */ |
| 119 | hwc->nmi = 0; |
| 120 | if (capable(CAP_SYS_ADMIN)) { |
| 121 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 122 | if (hw_event->nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 123 | hwc->nmi = 1; |
| 124 | } |
| 125 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 126 | hwc->irq_period = hw_event->irq_period; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 127 | /* |
| 128 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
| 129 | * so we install an artificial 1<<31 period regardless of |
| 130 | * the generic counter period: |
| 131 | */ |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 132 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 133 | hwc->irq_period = 0x7FFFFFFF; |
| 134 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 135 | atomic64_set(&hwc->period_left, hwc->irq_period); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 136 | |
| 137 | /* |
Thomas Gleixner | dfa7c89 | 2008-12-08 19:35:37 +0100 | [diff] [blame] | 138 | * Raw event type provide the config in the event structure |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 139 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 140 | if (hw_event->raw) { |
| 141 | hwc->config |= hw_event->type; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 142 | } else { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 143 | if (hw_event->type >= max_intel_perfmon_events) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 144 | return -EINVAL; |
| 145 | /* |
| 146 | * The generic map: |
| 147 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 148 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 149 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 150 | counter->wakeup_pending = 0; |
| 151 | |
| 152 | return 0; |
| 153 | } |
| 154 | |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 155 | u64 hw_perf_save_disable(void) |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 156 | { |
| 157 | u64 ctrl; |
| 158 | |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 159 | if (unlikely(!perf_counters_initialized)) |
| 160 | return 0; |
| 161 | |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 162 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 163 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 164 | |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 165 | return ctrl; |
| 166 | } |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 167 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 168 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 169 | void hw_perf_restore(u64 ctrl) |
| 170 | { |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 171 | if (unlikely(!perf_counters_initialized)) |
| 172 | return; |
| 173 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 174 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 175 | } |
| 176 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
| 177 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 178 | static inline void |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 179 | __pmc_fixed_disable(struct perf_counter *counter, |
| 180 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 181 | { |
| 182 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 183 | u64 ctrl_val, mask; |
| 184 | int err; |
| 185 | |
| 186 | mask = 0xfULL << (idx * 4); |
| 187 | |
| 188 | rdmsrl(hwc->config_base, ctrl_val); |
| 189 | ctrl_val &= ~mask; |
| 190 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
| 191 | } |
| 192 | |
| 193 | static inline void |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 194 | __pmc_generic_disable(struct perf_counter *counter, |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 195 | struct hw_perf_counter *hwc, unsigned int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 196 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 197 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 198 | __pmc_fixed_disable(counter, hwc, idx); |
| 199 | else |
| 200 | wrmsr_safe(hwc->config_base + idx, hwc->config, 0); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 201 | } |
| 202 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 203 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 204 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 205 | /* |
| 206 | * Set the next IRQ period, based on the hwc->period_left value. |
| 207 | * To be called with the counter disabled in hw: |
| 208 | */ |
| 209 | static void |
| 210 | __hw_perf_counter_set_period(struct perf_counter *counter, |
| 211 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 212 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 213 | s64 left = atomic64_read(&hwc->period_left); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 214 | s32 period = hwc->irq_period; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 215 | int err; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 216 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 217 | /* |
| 218 | * If we are way outside a reasoable range then just skip forward: |
| 219 | */ |
| 220 | if (unlikely(left <= -period)) { |
| 221 | left = period; |
| 222 | atomic64_set(&hwc->period_left, left); |
| 223 | } |
| 224 | |
| 225 | if (unlikely(left <= 0)) { |
| 226 | left += period; |
| 227 | atomic64_set(&hwc->period_left, left); |
| 228 | } |
| 229 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 230 | per_cpu(prev_left[idx], smp_processor_id()) = left; |
| 231 | |
| 232 | /* |
| 233 | * The hw counter starts counting from this counter offset, |
| 234 | * mark it to be able to extra future deltas: |
| 235 | */ |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 236 | atomic64_set(&hwc->prev_count, (u64)-left); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 237 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 238 | err = checking_wrmsrl(hwc->counter_base + idx, |
| 239 | (u64)(-left) & counter_value_mask); |
| 240 | } |
| 241 | |
| 242 | static inline void |
| 243 | __pmc_fixed_enable(struct perf_counter *counter, |
| 244 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 245 | { |
| 246 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 247 | u64 ctrl_val, bits, mask; |
| 248 | int err; |
| 249 | |
| 250 | /* |
| 251 | * Enable IRQ generation (0x8) and ring-3 counting (0x2), |
| 252 | * and enable ring-0 counting if allowed: |
| 253 | */ |
| 254 | bits = 0x8ULL | 0x2ULL; |
| 255 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) |
| 256 | bits |= 0x1; |
| 257 | bits <<= (idx * 4); |
| 258 | mask = 0xfULL << (idx * 4); |
| 259 | |
| 260 | rdmsrl(hwc->config_base, ctrl_val); |
| 261 | ctrl_val &= ~mask; |
| 262 | ctrl_val |= bits; |
| 263 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 264 | } |
| 265 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 266 | static void |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 267 | __pmc_generic_enable(struct perf_counter *counter, |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 268 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 269 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 270 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 271 | __pmc_fixed_enable(counter, hwc, idx); |
| 272 | else |
| 273 | wrmsr(hwc->config_base + idx, |
| 274 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 275 | } |
| 276 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 277 | static int |
| 278 | fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 279 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 280 | unsigned int event; |
| 281 | |
| 282 | if (unlikely(hwc->nmi)) |
| 283 | return -1; |
| 284 | |
| 285 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
| 286 | |
| 287 | if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS])) |
| 288 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
| 289 | if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES])) |
| 290 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
| 291 | if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES])) |
| 292 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
| 293 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 294 | return -1; |
| 295 | } |
| 296 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 297 | /* |
| 298 | * Find a PMC slot for the freshly enabled / scheduled in counter: |
| 299 | */ |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 300 | static int pmc_generic_enable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 301 | { |
| 302 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 303 | struct hw_perf_counter *hwc = &counter->hw; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 304 | int idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 305 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 306 | idx = fixed_mode_idx(counter, hwc); |
| 307 | if (idx >= 0) { |
| 308 | /* |
| 309 | * Try to get the fixed counter, if that is already taken |
| 310 | * then try to get a generic counter: |
| 311 | */ |
| 312 | if (test_and_set_bit(idx, cpuc->used)) |
| 313 | goto try_generic; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 314 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 315 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
| 316 | /* |
| 317 | * We set it so that counter_base + idx in wrmsr/rdmsr maps to |
| 318 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: |
| 319 | */ |
| 320 | hwc->counter_base = |
| 321 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 322 | hwc->idx = idx; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 323 | } else { |
| 324 | idx = hwc->idx; |
| 325 | /* Try to get the previous generic counter again */ |
| 326 | if (test_and_set_bit(idx, cpuc->used)) { |
| 327 | try_generic: |
| 328 | idx = find_first_zero_bit(cpuc->used, nr_counters_generic); |
| 329 | if (idx == nr_counters_generic) |
| 330 | return -EAGAIN; |
| 331 | |
| 332 | set_bit(idx, cpuc->used); |
| 333 | hwc->idx = idx; |
| 334 | } |
| 335 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
| 336 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | perf_counters_lapic_init(hwc->nmi); |
| 340 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 341 | __pmc_generic_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 342 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 343 | cpuc->counters[idx] = counter; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 344 | /* |
| 345 | * Make it visible before enabling the hw: |
| 346 | */ |
| 347 | smp_wmb(); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 348 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 349 | __hw_perf_counter_set_period(counter, hwc, idx); |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 350 | __pmc_generic_enable(counter, hwc, idx); |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 351 | |
| 352 | return 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | void perf_counter_print_debug(void) |
| 356 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 357 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 358 | struct cpu_hw_counters *cpuc; |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 359 | int cpu, idx; |
| 360 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 361 | if (!nr_counters_generic) |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 362 | return; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 363 | |
| 364 | local_irq_disable(); |
| 365 | |
| 366 | cpu = smp_processor_id(); |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 367 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 368 | |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 369 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 370 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 371 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 372 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 373 | |
| 374 | printk(KERN_INFO "\n"); |
| 375 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); |
| 376 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); |
| 377 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 378 | printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed); |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 379 | printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 380 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 381 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 382 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
| 383 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 384 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 385 | prev_left = per_cpu(prev_left[idx], cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 386 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 387 | printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 388 | cpu, idx, pmc_ctrl); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 389 | printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 390 | cpu, idx, pmc_count); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 391 | printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n", |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 392 | cpu, idx, prev_left); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 393 | } |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 394 | for (idx = 0; idx < nr_counters_fixed; idx++) { |
| 395 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); |
| 396 | |
| 397 | printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n", |
| 398 | cpu, idx, pmc_count); |
| 399 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 400 | local_irq_enable(); |
| 401 | } |
| 402 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 403 | static void pmc_generic_disable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 404 | { |
| 405 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 406 | struct hw_perf_counter *hwc = &counter->hw; |
| 407 | unsigned int idx = hwc->idx; |
| 408 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 409 | __pmc_generic_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 410 | |
| 411 | clear_bit(idx, cpuc->used); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 412 | cpuc->counters[idx] = NULL; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 413 | /* |
| 414 | * Make sure the cleared pointer becomes visible before we |
| 415 | * (potentially) free the counter: |
| 416 | */ |
| 417 | smp_wmb(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 418 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 419 | /* |
| 420 | * Drain the remaining delta count out of a counter |
| 421 | * that we are disabling: |
| 422 | */ |
| 423 | x86_perf_counter_update(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) |
| 427 | { |
| 428 | struct perf_data *irqdata = counter->irqdata; |
| 429 | |
| 430 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { |
| 431 | irqdata->overrun++; |
| 432 | } else { |
| 433 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; |
| 434 | |
| 435 | *p = data; |
| 436 | irqdata->len += sizeof(u64); |
| 437 | } |
| 438 | } |
| 439 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 440 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 441 | * Save and restart an expired counter. Called by NMI contexts, |
| 442 | * so it has to be careful about preempting normal counter ops: |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 443 | */ |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 444 | static void perf_save_and_restart(struct perf_counter *counter) |
| 445 | { |
| 446 | struct hw_perf_counter *hwc = &counter->hw; |
| 447 | int idx = hwc->idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 448 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 449 | x86_perf_counter_update(counter, hwc, idx); |
| 450 | __hw_perf_counter_set_period(counter, hwc, idx); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 451 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 452 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 453 | __pmc_generic_enable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 454 | } |
| 455 | |
| 456 | static void |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 457 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 458 | { |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 459 | struct perf_counter *counter, *group_leader = sibling->group_leader; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 460 | |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 461 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 462 | * Store sibling timestamps (if any): |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 463 | */ |
| 464 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 465 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 466 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 467 | perf_store_irq_data(sibling, counter->hw_event.type); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 468 | perf_store_irq_data(sibling, atomic64_read(&counter->count)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 469 | } |
| 470 | } |
| 471 | |
| 472 | /* |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 473 | * Maximum interrupt frequency of 100KHz per CPU |
| 474 | */ |
| 475 | #define PERFMON_MAX_INTERRUPTS 100000/HZ |
| 476 | |
| 477 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 478 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
| 479 | * rules apply: |
| 480 | */ |
| 481 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) |
| 482 | { |
| 483 | int bit, cpu = smp_processor_id(); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 484 | u64 ack, status; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 485 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 486 | |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 487 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 488 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 489 | /* Disable counters globally */ |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 490 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 491 | ack_APIC_irq(); |
| 492 | |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 493 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 494 | if (!status) |
| 495 | goto out; |
| 496 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 497 | again: |
| 498 | ack = status; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 499 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 500 | struct perf_counter *counter = cpuc->counters[bit]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 501 | |
| 502 | clear_bit(bit, (unsigned long *) &status); |
| 503 | if (!counter) |
| 504 | continue; |
| 505 | |
| 506 | perf_save_and_restart(counter); |
| 507 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 508 | switch (counter->hw_event.record_type) { |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 509 | case PERF_RECORD_SIMPLE: |
| 510 | continue; |
| 511 | case PERF_RECORD_IRQ: |
| 512 | perf_store_irq_data(counter, instruction_pointer(regs)); |
| 513 | break; |
| 514 | case PERF_RECORD_GROUP: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 515 | perf_handle_group(counter, &status, &ack); |
| 516 | break; |
| 517 | } |
| 518 | /* |
| 519 | * From NMI context we cannot call into the scheduler to |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 520 | * do a task wakeup - but we mark these generic as |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 521 | * wakeup_pending and initate a wakeup callback: |
| 522 | */ |
| 523 | if (nmi) { |
| 524 | counter->wakeup_pending = 1; |
| 525 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); |
| 526 | } else { |
| 527 | wake_up(&counter->waitq); |
| 528 | } |
| 529 | } |
| 530 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 531 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 532 | |
| 533 | /* |
| 534 | * Repeat if there is more work to be done: |
| 535 | */ |
| 536 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 537 | if (status) |
| 538 | goto again; |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 539 | out: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 540 | /* |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 541 | * Restore - do not reenable when global enable is off or throttled: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 542 | */ |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 543 | if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 544 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); |
| 545 | } |
| 546 | |
| 547 | void perf_counter_unthrottle(void) |
| 548 | { |
| 549 | struct cpu_hw_counters *cpuc; |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 550 | u64 global_enable; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 551 | |
| 552 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 553 | return; |
| 554 | |
| 555 | if (unlikely(!perf_counters_initialized)) |
| 556 | return; |
| 557 | |
| 558 | cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 559 | if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 560 | if (printk_ratelimit()) |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 561 | printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 562 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 563 | } |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame^] | 564 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable); |
| 565 | if (unlikely(cpuc->global_enable && !global_enable)) |
| 566 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); |
| 567 | cpuc->interrupts = 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 568 | } |
| 569 | |
| 570 | void smp_perf_counter_interrupt(struct pt_regs *regs) |
| 571 | { |
| 572 | irq_enter(); |
Ingo Molnar | 92bf73e | 2008-12-12 12:00:02 +0100 | [diff] [blame] | 573 | inc_irq_stat(apic_perf_irqs); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 574 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 575 | __smp_perf_counter_interrupt(regs, 0); |
| 576 | |
| 577 | irq_exit(); |
| 578 | } |
| 579 | |
| 580 | /* |
| 581 | * This handler is triggered by NMI contexts: |
| 582 | */ |
| 583 | void perf_counter_notify(struct pt_regs *regs) |
| 584 | { |
| 585 | struct cpu_hw_counters *cpuc; |
| 586 | unsigned long flags; |
| 587 | int bit, cpu; |
| 588 | |
| 589 | local_irq_save(flags); |
| 590 | cpu = smp_processor_id(); |
| 591 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
| 592 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 593 | for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) { |
| 594 | struct perf_counter *counter = cpuc->counters[bit]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 595 | |
| 596 | if (!counter) |
| 597 | continue; |
| 598 | |
| 599 | if (counter->wakeup_pending) { |
| 600 | counter->wakeup_pending = 0; |
| 601 | wake_up(&counter->waitq); |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | local_irq_restore(flags); |
| 606 | } |
| 607 | |
| 608 | void __cpuinit perf_counters_lapic_init(int nmi) |
| 609 | { |
| 610 | u32 apic_val; |
| 611 | |
| 612 | if (!perf_counters_initialized) |
| 613 | return; |
| 614 | /* |
| 615 | * Enable the performance counter vector in the APIC LVT: |
| 616 | */ |
| 617 | apic_val = apic_read(APIC_LVTERR); |
| 618 | |
| 619 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); |
| 620 | if (nmi) |
| 621 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 622 | else |
| 623 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 624 | apic_write(APIC_LVTERR, apic_val); |
| 625 | } |
| 626 | |
| 627 | static int __kprobes |
| 628 | perf_counter_nmi_handler(struct notifier_block *self, |
| 629 | unsigned long cmd, void *__args) |
| 630 | { |
| 631 | struct die_args *args = __args; |
| 632 | struct pt_regs *regs; |
| 633 | |
| 634 | if (likely(cmd != DIE_NMI_IPI)) |
| 635 | return NOTIFY_DONE; |
| 636 | |
| 637 | regs = args->regs; |
| 638 | |
| 639 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 640 | __smp_perf_counter_interrupt(regs, 1); |
| 641 | |
| 642 | return NOTIFY_STOP; |
| 643 | } |
| 644 | |
| 645 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { |
| 646 | .notifier_call = perf_counter_nmi_handler |
| 647 | }; |
| 648 | |
| 649 | void __init init_hw_perf_counters(void) |
| 650 | { |
| 651 | union cpuid10_eax eax; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 652 | unsigned int ebx; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 653 | unsigned int unused; |
| 654 | union cpuid10_edx edx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 655 | |
| 656 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 657 | return; |
| 658 | |
| 659 | /* |
| 660 | * Check whether the Architectural PerfMon supports |
| 661 | * Branch Misses Retired Event or not. |
| 662 | */ |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 663 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 664 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) |
| 665 | return; |
| 666 | |
| 667 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); |
| 668 | |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 669 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); |
| 670 | printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 671 | nr_counters_generic = eax.split.num_counters; |
| 672 | if (nr_counters_generic > X86_PMC_MAX_GENERIC) { |
| 673 | nr_counters_generic = X86_PMC_MAX_GENERIC; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 674 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 675 | nr_counters_generic, X86_PMC_MAX_GENERIC); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 676 | } |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 677 | perf_counter_mask = (1 << nr_counters_generic) - 1; |
| 678 | perf_max_counters = nr_counters_generic; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 679 | |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 680 | printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 681 | counter_value_mask = (1ULL << eax.split.bit_width) - 1; |
| 682 | printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask); |
| 683 | |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 684 | printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); |
| 685 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 686 | nr_counters_fixed = edx.split.num_counters_fixed; |
| 687 | if (nr_counters_fixed > X86_PMC_MAX_FIXED) { |
| 688 | nr_counters_fixed = X86_PMC_MAX_FIXED; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 689 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 690 | nr_counters_fixed, X86_PMC_MAX_FIXED); |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 691 | } |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 692 | printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 693 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 694 | perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
| 695 | |
| 696 | printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask); |
Ingo Molnar | 75f224c | 2008-12-14 21:58:46 +0100 | [diff] [blame] | 697 | perf_counters_initialized = true; |
| 698 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 699 | perf_counters_lapic_init(0); |
| 700 | register_die_notifier(&perf_counter_nmi_notifier); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 701 | } |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 702 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 703 | static void pmc_generic_read(struct perf_counter *counter) |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 704 | { |
| 705 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
| 706 | } |
| 707 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 708 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 709 | .enable = pmc_generic_enable, |
| 710 | .disable = pmc_generic_disable, |
| 711 | .read = pmc_generic_read, |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 712 | }; |
| 713 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 714 | const struct hw_perf_counter_ops * |
| 715 | hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 716 | { |
| 717 | int err; |
| 718 | |
| 719 | err = __hw_perf_counter_init(counter); |
| 720 | if (err) |
| 721 | return NULL; |
| 722 | |
| 723 | return &x86_perf_counter_ops; |
| 724 | } |