Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counter x86 architecture code |
| 3 | * |
| 4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * For licencing details see kernel-base/COPYING |
| 8 | */ |
| 9 | |
| 10 | #include <linux/perf_counter.h> |
| 11 | #include <linux/capability.h> |
| 12 | #include <linux/notifier.h> |
| 13 | #include <linux/hardirq.h> |
| 14 | #include <linux/kprobes.h> |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 15 | #include <linux/module.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 16 | #include <linux/kdebug.h> |
| 17 | #include <linux/sched.h> |
| 18 | |
| 19 | #include <asm/intel_arch_perfmon.h> |
| 20 | #include <asm/apic.h> |
| 21 | |
| 22 | static bool perf_counters_initialized __read_mostly; |
| 23 | |
| 24 | /* |
| 25 | * Number of (generic) HW counters: |
| 26 | */ |
| 27 | static int nr_hw_counters __read_mostly; |
| 28 | static u32 perf_counter_mask __read_mostly; |
| 29 | |
| 30 | /* No support for fixed function counters yet */ |
| 31 | |
| 32 | #define MAX_HW_COUNTERS 8 |
| 33 | |
| 34 | struct cpu_hw_counters { |
| 35 | struct perf_counter *counters[MAX_HW_COUNTERS]; |
| 36 | unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
| 39 | /* |
| 40 | * Intel PerfMon v3. Used on Core2 and later. |
| 41 | */ |
| 42 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); |
| 43 | |
| 44 | const int intel_perfmon_event_map[] = |
| 45 | { |
| 46 | [PERF_COUNT_CYCLES] = 0x003c, |
| 47 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 48 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, |
| 49 | [PERF_COUNT_CACHE_MISSES] = 0x412e, |
| 50 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 51 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
| 52 | }; |
| 53 | |
| 54 | const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); |
| 55 | |
| 56 | /* |
| 57 | * Setup the hardware configuration for a given hw_event_type |
| 58 | */ |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 59 | static int __hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 60 | { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 61 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 62 | struct hw_perf_counter *hwc = &counter->hw; |
| 63 | |
| 64 | if (unlikely(!perf_counters_initialized)) |
| 65 | return -EINVAL; |
| 66 | |
| 67 | /* |
| 68 | * Count user events, and generate PMC IRQs: |
| 69 | * (keep 'enabled' bit clear for now) |
| 70 | */ |
| 71 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; |
| 72 | |
| 73 | /* |
| 74 | * If privileged enough, count OS events too, and allow |
| 75 | * NMI events as well: |
| 76 | */ |
| 77 | hwc->nmi = 0; |
| 78 | if (capable(CAP_SYS_ADMIN)) { |
| 79 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 80 | if (hw_event->nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 81 | hwc->nmi = 1; |
| 82 | } |
| 83 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 84 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
| 85 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 86 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 87 | hwc->irq_period = hw_event->irq_period; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 88 | /* |
| 89 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
| 90 | * so we install an artificial 1<<31 period regardless of |
| 91 | * the generic counter period: |
| 92 | */ |
| 93 | if (!hwc->irq_period) |
| 94 | hwc->irq_period = 0x7FFFFFFF; |
| 95 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 96 | hwc->next_count = -(s32)hwc->irq_period; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 97 | |
| 98 | /* |
Thomas Gleixner | dfa7c89 | 2008-12-08 19:35:37 +0100 | [diff] [blame] | 99 | * Raw event type provide the config in the event structure |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 100 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 101 | if (hw_event->raw) { |
| 102 | hwc->config |= hw_event->type; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 103 | } else { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 104 | if (hw_event->type >= max_intel_perfmon_events) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 105 | return -EINVAL; |
| 106 | /* |
| 107 | * The generic map: |
| 108 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 109 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 110 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 111 | counter->wakeup_pending = 0; |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 116 | void hw_perf_enable_all(void) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 117 | { |
| 118 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
| 119 | } |
| 120 | |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 121 | void hw_perf_restore_ctrl(u64 ctrl) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 122 | { |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 123 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 124 | } |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 125 | EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl); |
| 126 | |
| 127 | u64 hw_perf_disable_all(void) |
| 128 | { |
| 129 | u64 ctrl; |
| 130 | |
| 131 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 132 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
| 133 | return ctrl; |
| 134 | } |
| 135 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 136 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 137 | static inline void |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 138 | __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 139 | { |
| 140 | wrmsr(hwc->config_base + idx, hwc->config, 0); |
| 141 | } |
| 142 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 143 | static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); |
| 144 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 145 | static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 146 | { |
| 147 | per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; |
| 148 | |
| 149 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 150 | } |
| 151 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 152 | static void __x86_perf_counter_enable(struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 153 | { |
| 154 | wrmsr(hwc->config_base + idx, |
| 155 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 156 | } |
| 157 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 158 | static void x86_perf_counter_enable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 159 | { |
| 160 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 161 | struct hw_perf_counter *hwc = &counter->hw; |
| 162 | int idx = hwc->idx; |
| 163 | |
| 164 | /* Try to get the previous counter again */ |
| 165 | if (test_and_set_bit(idx, cpuc->used)) { |
| 166 | idx = find_first_zero_bit(cpuc->used, nr_hw_counters); |
| 167 | set_bit(idx, cpuc->used); |
| 168 | hwc->idx = idx; |
| 169 | } |
| 170 | |
| 171 | perf_counters_lapic_init(hwc->nmi); |
| 172 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 173 | __x86_perf_counter_disable(hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 174 | |
| 175 | cpuc->counters[idx] = counter; |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 176 | |
| 177 | __hw_perf_counter_set_period(hwc, idx); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 178 | __x86_perf_counter_enable(hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 181 | static void __hw_perf_save_counter(struct perf_counter *counter, |
| 182 | struct hw_perf_counter *hwc, int idx) |
| 183 | { |
| 184 | s64 raw = -1; |
| 185 | s64 delta; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * Get the raw hw counter value: |
| 189 | */ |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 190 | rdmsrl(hwc->counter_base + idx, raw); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * Rebase it to zero (it started counting at -irq_period), |
| 194 | * to see the delta since ->prev_count: |
| 195 | */ |
| 196 | delta = (s64)hwc->irq_period + (s64)(s32)raw; |
| 197 | |
| 198 | atomic64_counter_set(counter, hwc->prev_count + delta); |
| 199 | |
| 200 | /* |
| 201 | * Adjust the ->prev_count offset - if we went beyond |
| 202 | * irq_period of units, then we got an IRQ and the counter |
| 203 | * was set back to -irq_period: |
| 204 | */ |
| 205 | while (delta >= (s64)hwc->irq_period) { |
| 206 | hwc->prev_count += hwc->irq_period; |
| 207 | delta -= (s64)hwc->irq_period; |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Calculate the next raw counter value we'll write into |
| 212 | * the counter at the next sched-in time: |
| 213 | */ |
| 214 | delta -= (s64)hwc->irq_period; |
| 215 | |
| 216 | hwc->next_count = (s32)delta; |
| 217 | } |
| 218 | |
| 219 | void perf_counter_print_debug(void) |
| 220 | { |
| 221 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 222 | int cpu, idx; |
| 223 | |
| 224 | if (!nr_hw_counters) |
| 225 | return; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 226 | |
| 227 | local_irq_disable(); |
| 228 | |
| 229 | cpu = smp_processor_id(); |
| 230 | |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 231 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 232 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 233 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 234 | |
| 235 | printk(KERN_INFO "\n"); |
| 236 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); |
| 237 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); |
| 238 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); |
| 239 | |
| 240 | for (idx = 0; idx < nr_hw_counters; idx++) { |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 241 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
| 242 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 243 | |
| 244 | next_count = per_cpu(prev_next_count[idx], cpu); |
| 245 | |
| 246 | printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", |
| 247 | cpu, idx, pmc_ctrl); |
| 248 | printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", |
| 249 | cpu, idx, pmc_count); |
| 250 | printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n", |
| 251 | cpu, idx, next_count); |
| 252 | } |
| 253 | local_irq_enable(); |
| 254 | } |
| 255 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 256 | static void x86_perf_counter_disable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 257 | { |
| 258 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 259 | struct hw_perf_counter *hwc = &counter->hw; |
| 260 | unsigned int idx = hwc->idx; |
| 261 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 262 | __x86_perf_counter_disable(hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 263 | |
| 264 | clear_bit(idx, cpuc->used); |
| 265 | cpuc->counters[idx] = NULL; |
| 266 | __hw_perf_save_counter(counter, hwc, idx); |
| 267 | } |
| 268 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 269 | static void x86_perf_counter_read(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 270 | { |
| 271 | struct hw_perf_counter *hwc = &counter->hw; |
| 272 | unsigned long addr = hwc->counter_base + hwc->idx; |
| 273 | s64 offs, val = -1LL; |
| 274 | s32 val32; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 275 | |
| 276 | /* Careful: NMI might modify the counter offset */ |
| 277 | do { |
| 278 | offs = hwc->prev_count; |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 279 | rdmsrl(addr, val); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 280 | } while (offs != hwc->prev_count); |
| 281 | |
| 282 | val32 = (s32) val; |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame^] | 283 | val = (s64)hwc->irq_period + (s64)val32; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 284 | atomic64_counter_set(counter, hwc->prev_count + val); |
| 285 | } |
| 286 | |
| 287 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) |
| 288 | { |
| 289 | struct perf_data *irqdata = counter->irqdata; |
| 290 | |
| 291 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { |
| 292 | irqdata->overrun++; |
| 293 | } else { |
| 294 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; |
| 295 | |
| 296 | *p = data; |
| 297 | irqdata->len += sizeof(u64); |
| 298 | } |
| 299 | } |
| 300 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 301 | /* |
| 302 | * NMI-safe enable method: |
| 303 | */ |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 304 | static void perf_save_and_restart(struct perf_counter *counter) |
| 305 | { |
| 306 | struct hw_perf_counter *hwc = &counter->hw; |
| 307 | int idx = hwc->idx; |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 308 | u64 pmc_ctrl; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 309 | |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 310 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 311 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 312 | __hw_perf_save_counter(counter, hwc, idx); |
| 313 | __hw_perf_counter_set_period(hwc, idx); |
| 314 | |
| 315 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 316 | __x86_perf_counter_enable(hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | static void |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 320 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 321 | { |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 322 | struct perf_counter *counter, *group_leader = sibling->group_leader; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 323 | int bit; |
| 324 | |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 325 | /* |
| 326 | * Store the counter's own timestamp first: |
| 327 | */ |
| 328 | perf_store_irq_data(sibling, sibling->hw_event.type); |
| 329 | perf_store_irq_data(sibling, atomic64_counter_read(sibling)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 330 | |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 331 | /* |
| 332 | * Then store sibling timestamps (if any): |
| 333 | */ |
| 334 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { |
| 335 | if (!counter->active) { |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 336 | /* |
| 337 | * When counter was not in the overflow mask, we have to |
| 338 | * read it from hardware. We read it as well, when it |
| 339 | * has not been read yet and clear the bit in the |
| 340 | * status mask. |
| 341 | */ |
| 342 | bit = counter->hw.idx; |
| 343 | if (!test_bit(bit, (unsigned long *) overflown) || |
| 344 | test_bit(bit, (unsigned long *) status)) { |
| 345 | clear_bit(bit, (unsigned long *) status); |
| 346 | perf_save_and_restart(counter); |
| 347 | } |
| 348 | } |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 349 | perf_store_irq_data(sibling, counter->hw_event.type); |
| 350 | perf_store_irq_data(sibling, atomic64_counter_read(counter)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 351 | } |
| 352 | } |
| 353 | |
| 354 | /* |
| 355 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
| 356 | * rules apply: |
| 357 | */ |
| 358 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) |
| 359 | { |
| 360 | int bit, cpu = smp_processor_id(); |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 361 | u64 ack, status, saved_global; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 362 | struct cpu_hw_counters *cpuc; |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 363 | |
| 364 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 365 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 366 | /* Disable counters globally */ |
| 367 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
| 368 | ack_APIC_irq(); |
| 369 | |
| 370 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
| 371 | |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 372 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 373 | if (!status) |
| 374 | goto out; |
| 375 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 376 | again: |
| 377 | ack = status; |
| 378 | for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { |
| 379 | struct perf_counter *counter = cpuc->counters[bit]; |
| 380 | |
| 381 | clear_bit(bit, (unsigned long *) &status); |
| 382 | if (!counter) |
| 383 | continue; |
| 384 | |
| 385 | perf_save_and_restart(counter); |
| 386 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 387 | switch (counter->hw_event.record_type) { |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 388 | case PERF_RECORD_SIMPLE: |
| 389 | continue; |
| 390 | case PERF_RECORD_IRQ: |
| 391 | perf_store_irq_data(counter, instruction_pointer(regs)); |
| 392 | break; |
| 393 | case PERF_RECORD_GROUP: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 394 | perf_handle_group(counter, &status, &ack); |
| 395 | break; |
| 396 | } |
| 397 | /* |
| 398 | * From NMI context we cannot call into the scheduler to |
| 399 | * do a task wakeup - but we mark these counters as |
| 400 | * wakeup_pending and initate a wakeup callback: |
| 401 | */ |
| 402 | if (nmi) { |
| 403 | counter->wakeup_pending = 1; |
| 404 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); |
| 405 | } else { |
| 406 | wake_up(&counter->waitq); |
| 407 | } |
| 408 | } |
| 409 | |
| 410 | wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); |
| 411 | |
| 412 | /* |
| 413 | * Repeat if there is more work to be done: |
| 414 | */ |
| 415 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 416 | if (status) |
| 417 | goto again; |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 418 | out: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 419 | /* |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 420 | * Restore - do not reenable when global enable is off: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 421 | */ |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 422 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | void smp_perf_counter_interrupt(struct pt_regs *regs) |
| 426 | { |
| 427 | irq_enter(); |
| 428 | #ifdef CONFIG_X86_64 |
| 429 | add_pda(apic_perf_irqs, 1); |
| 430 | #else |
| 431 | per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++; |
| 432 | #endif |
| 433 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 434 | __smp_perf_counter_interrupt(regs, 0); |
| 435 | |
| 436 | irq_exit(); |
| 437 | } |
| 438 | |
| 439 | /* |
| 440 | * This handler is triggered by NMI contexts: |
| 441 | */ |
| 442 | void perf_counter_notify(struct pt_regs *regs) |
| 443 | { |
| 444 | struct cpu_hw_counters *cpuc; |
| 445 | unsigned long flags; |
| 446 | int bit, cpu; |
| 447 | |
| 448 | local_irq_save(flags); |
| 449 | cpu = smp_processor_id(); |
| 450 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
| 451 | |
| 452 | for_each_bit(bit, cpuc->used, nr_hw_counters) { |
| 453 | struct perf_counter *counter = cpuc->counters[bit]; |
| 454 | |
| 455 | if (!counter) |
| 456 | continue; |
| 457 | |
| 458 | if (counter->wakeup_pending) { |
| 459 | counter->wakeup_pending = 0; |
| 460 | wake_up(&counter->waitq); |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | local_irq_restore(flags); |
| 465 | } |
| 466 | |
| 467 | void __cpuinit perf_counters_lapic_init(int nmi) |
| 468 | { |
| 469 | u32 apic_val; |
| 470 | |
| 471 | if (!perf_counters_initialized) |
| 472 | return; |
| 473 | /* |
| 474 | * Enable the performance counter vector in the APIC LVT: |
| 475 | */ |
| 476 | apic_val = apic_read(APIC_LVTERR); |
| 477 | |
| 478 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); |
| 479 | if (nmi) |
| 480 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 481 | else |
| 482 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 483 | apic_write(APIC_LVTERR, apic_val); |
| 484 | } |
| 485 | |
| 486 | static int __kprobes |
| 487 | perf_counter_nmi_handler(struct notifier_block *self, |
| 488 | unsigned long cmd, void *__args) |
| 489 | { |
| 490 | struct die_args *args = __args; |
| 491 | struct pt_regs *regs; |
| 492 | |
| 493 | if (likely(cmd != DIE_NMI_IPI)) |
| 494 | return NOTIFY_DONE; |
| 495 | |
| 496 | regs = args->regs; |
| 497 | |
| 498 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 499 | __smp_perf_counter_interrupt(regs, 1); |
| 500 | |
| 501 | return NOTIFY_STOP; |
| 502 | } |
| 503 | |
| 504 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { |
| 505 | .notifier_call = perf_counter_nmi_handler |
| 506 | }; |
| 507 | |
| 508 | void __init init_hw_perf_counters(void) |
| 509 | { |
| 510 | union cpuid10_eax eax; |
| 511 | unsigned int unused; |
| 512 | unsigned int ebx; |
| 513 | |
| 514 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 515 | return; |
| 516 | |
| 517 | /* |
| 518 | * Check whether the Architectural PerfMon supports |
| 519 | * Branch Misses Retired Event or not. |
| 520 | */ |
| 521 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
| 522 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) |
| 523 | return; |
| 524 | |
| 525 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); |
| 526 | |
| 527 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); |
| 528 | printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); |
| 529 | nr_hw_counters = eax.split.num_counters; |
| 530 | if (nr_hw_counters > MAX_HW_COUNTERS) { |
| 531 | nr_hw_counters = MAX_HW_COUNTERS; |
| 532 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
| 533 | nr_hw_counters, MAX_HW_COUNTERS); |
| 534 | } |
| 535 | perf_counter_mask = (1 << nr_hw_counters) - 1; |
| 536 | perf_max_counters = nr_hw_counters; |
| 537 | |
| 538 | printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); |
| 539 | printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); |
| 540 | |
| 541 | perf_counters_lapic_init(0); |
| 542 | register_die_notifier(&perf_counter_nmi_notifier); |
| 543 | |
| 544 | perf_counters_initialized = true; |
| 545 | } |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 546 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame^] | 547 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 548 | .hw_perf_counter_enable = x86_perf_counter_enable, |
| 549 | .hw_perf_counter_disable = x86_perf_counter_disable, |
| 550 | .hw_perf_counter_read = x86_perf_counter_read, |
| 551 | }; |
| 552 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame^] | 553 | const struct hw_perf_counter_ops * |
| 554 | hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 555 | { |
| 556 | int err; |
| 557 | |
| 558 | err = __hw_perf_counter_init(counter); |
| 559 | if (err) |
| 560 | return NULL; |
| 561 | |
| 562 | return &x86_perf_counter_ops; |
| 563 | } |