Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counter x86 architecture code |
| 3 | * |
| 4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 6 | * Copyright(C) 2009 Jaswinder Singh Rajput |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 7 | * |
| 8 | * For licencing details see kernel-base/COPYING |
| 9 | */ |
| 10 | |
| 11 | #include <linux/perf_counter.h> |
| 12 | #include <linux/capability.h> |
| 13 | #include <linux/notifier.h> |
| 14 | #include <linux/hardirq.h> |
| 15 | #include <linux/kprobes.h> |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 16 | #include <linux/module.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 17 | #include <linux/kdebug.h> |
| 18 | #include <linux/sched.h> |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 19 | #include <linux/uaccess.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 20 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 21 | #include <asm/apic.h> |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 22 | #include <asm/stacktrace.h> |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 23 | #include <asm/nmi.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 24 | |
| 25 | static bool perf_counters_initialized __read_mostly; |
| 26 | |
| 27 | /* |
| 28 | * Number of (generic) HW counters: |
| 29 | */ |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 30 | static int nr_counters_generic __read_mostly; |
| 31 | static u64 perf_counter_mask __read_mostly; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 32 | static u64 counter_value_mask __read_mostly; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 33 | static int counter_value_bits __read_mostly; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 34 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 35 | static int nr_counters_fixed __read_mostly; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 36 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 37 | struct cpu_hw_counters { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 38 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
| 39 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 40 | unsigned long interrupts; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 41 | u64 throttle_ctrl; |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 42 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 43 | int enabled; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | /* |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 47 | * struct pmc_x86_ops - performance counter x86 ops |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 48 | */ |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 49 | struct pmc_x86_ops { |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 50 | u64 (*save_disable_all)(void); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 51 | void (*restore_all)(u64); |
| 52 | u64 (*get_status)(u64); |
| 53 | void (*ack_status)(u64); |
| 54 | void (*enable)(int, u64); |
| 55 | void (*disable)(int, u64); |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 56 | unsigned eventsel; |
| 57 | unsigned perfctr; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 58 | u64 (*event_map)(int); |
| 59 | u64 (*raw_event)(u64); |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 60 | int max_events; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 61 | }; |
| 62 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 63 | static struct pmc_x86_ops *pmc_ops __read_mostly; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 64 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 65 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { |
| 66 | .enabled = 1, |
| 67 | }; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 68 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 69 | static __read_mostly int intel_perfmon_version; |
| 70 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 71 | /* |
| 72 | * Intel PerfMon v3. Used on Core2 and later. |
| 73 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 74 | static const u64 intel_perfmon_event_map[] = |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 75 | { |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 76 | [PERF_COUNT_CPU_CYCLES] = 0x003c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 77 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 78 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, |
| 79 | [PERF_COUNT_CACHE_MISSES] = 0x412e, |
| 80 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 81 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 82 | [PERF_COUNT_BUS_CYCLES] = 0x013c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 83 | }; |
| 84 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 85 | static u64 pmc_intel_event_map(int event) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 86 | { |
| 87 | return intel_perfmon_event_map[event]; |
| 88 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 89 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 90 | static u64 pmc_intel_raw_event(u64 event) |
| 91 | { |
Peter Zijlstra | 82bae4f8 | 2009-03-13 12:21:31 +0100 | [diff] [blame] | 92 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL |
| 93 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL |
| 94 | #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 95 | |
| 96 | #define CORE_EVNTSEL_MASK \ |
| 97 | (CORE_EVNTSEL_EVENT_MASK | \ |
| 98 | CORE_EVNTSEL_UNIT_MASK | \ |
| 99 | CORE_EVNTSEL_COUNTER_MASK) |
| 100 | |
| 101 | return event & CORE_EVNTSEL_MASK; |
| 102 | } |
| 103 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 104 | /* |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 105 | * AMD Performance Monitor K7 and later. |
| 106 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 107 | static const u64 amd_perfmon_event_map[] = |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 108 | { |
| 109 | [PERF_COUNT_CPU_CYCLES] = 0x0076, |
| 110 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 111 | [PERF_COUNT_CACHE_REFERENCES] = 0x0080, |
| 112 | [PERF_COUNT_CACHE_MISSES] = 0x0081, |
| 113 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 114 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
| 115 | }; |
| 116 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 117 | static u64 pmc_amd_event_map(int event) |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 118 | { |
| 119 | return amd_perfmon_event_map[event]; |
| 120 | } |
| 121 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 122 | static u64 pmc_amd_raw_event(u64 event) |
| 123 | { |
Peter Zijlstra | 82bae4f8 | 2009-03-13 12:21:31 +0100 | [diff] [blame] | 124 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL |
| 125 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL |
| 126 | #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 127 | |
| 128 | #define K7_EVNTSEL_MASK \ |
| 129 | (K7_EVNTSEL_EVENT_MASK | \ |
| 130 | K7_EVNTSEL_UNIT_MASK | \ |
| 131 | K7_EVNTSEL_COUNTER_MASK) |
| 132 | |
| 133 | return event & K7_EVNTSEL_MASK; |
| 134 | } |
| 135 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 136 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 137 | * Propagate counter elapsed time into the generic counter. |
| 138 | * Can only be executed on the CPU where the counter is active. |
| 139 | * Returns the delta events processed. |
| 140 | */ |
| 141 | static void |
| 142 | x86_perf_counter_update(struct perf_counter *counter, |
| 143 | struct hw_perf_counter *hwc, int idx) |
| 144 | { |
| 145 | u64 prev_raw_count, new_raw_count, delta; |
| 146 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 147 | /* |
| 148 | * Careful: an NMI might modify the previous counter value. |
| 149 | * |
| 150 | * Our tactic to handle this is to first atomically read and |
| 151 | * exchange a new raw count - then add that new-prev delta |
| 152 | * count to the generic counter atomically: |
| 153 | */ |
| 154 | again: |
| 155 | prev_raw_count = atomic64_read(&hwc->prev_count); |
| 156 | rdmsrl(hwc->counter_base + idx, new_raw_count); |
| 157 | |
| 158 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 159 | new_raw_count) != prev_raw_count) |
| 160 | goto again; |
| 161 | |
| 162 | /* |
| 163 | * Now we have the new raw value and have updated the prev |
| 164 | * timestamp already. We can now calculate the elapsed delta |
| 165 | * (counter-)time and add that to the generic counter. |
| 166 | * |
| 167 | * Careful, not all hw sign-extends above the physical width |
| 168 | * of the count, so we do that by clipping the delta to 32 bits: |
| 169 | */ |
| 170 | delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 171 | |
| 172 | atomic64_add(delta, &counter->count); |
| 173 | atomic64_sub(delta, &hwc->period_left); |
| 174 | } |
| 175 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 176 | static atomic_t num_counters; |
| 177 | static DEFINE_MUTEX(pmc_reserve_mutex); |
| 178 | |
| 179 | static bool reserve_pmc_hardware(void) |
| 180 | { |
| 181 | int i; |
| 182 | |
| 183 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 184 | disable_lapic_nmi_watchdog(); |
| 185 | |
| 186 | for (i = 0; i < nr_counters_generic; i++) { |
| 187 | if (!reserve_perfctr_nmi(pmc_ops->perfctr + i)) |
| 188 | goto perfctr_fail; |
| 189 | } |
| 190 | |
| 191 | for (i = 0; i < nr_counters_generic; i++) { |
| 192 | if (!reserve_evntsel_nmi(pmc_ops->eventsel + i)) |
| 193 | goto eventsel_fail; |
| 194 | } |
| 195 | |
| 196 | return true; |
| 197 | |
| 198 | eventsel_fail: |
| 199 | for (i--; i >= 0; i--) |
| 200 | release_evntsel_nmi(pmc_ops->eventsel + i); |
| 201 | |
| 202 | i = nr_counters_generic; |
| 203 | |
| 204 | perfctr_fail: |
| 205 | for (i--; i >= 0; i--) |
| 206 | release_perfctr_nmi(pmc_ops->perfctr + i); |
| 207 | |
| 208 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 209 | enable_lapic_nmi_watchdog(); |
| 210 | |
| 211 | return false; |
| 212 | } |
| 213 | |
| 214 | static void release_pmc_hardware(void) |
| 215 | { |
| 216 | int i; |
| 217 | |
| 218 | for (i = 0; i < nr_counters_generic; i++) { |
| 219 | release_perfctr_nmi(pmc_ops->perfctr + i); |
| 220 | release_evntsel_nmi(pmc_ops->eventsel + i); |
| 221 | } |
| 222 | |
| 223 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 224 | enable_lapic_nmi_watchdog(); |
| 225 | } |
| 226 | |
| 227 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
| 228 | { |
| 229 | if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { |
| 230 | release_pmc_hardware(); |
| 231 | mutex_unlock(&pmc_reserve_mutex); |
| 232 | } |
| 233 | } |
| 234 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 235 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 236 | * Setup the hardware configuration for a given hw_event_type |
| 237 | */ |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 238 | static int __hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 239 | { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 240 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 241 | struct hw_perf_counter *hwc = &counter->hw; |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 242 | int err; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 243 | |
| 244 | if (unlikely(!perf_counters_initialized)) |
| 245 | return -EINVAL; |
| 246 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 247 | err = 0; |
| 248 | if (atomic_inc_not_zero(&num_counters)) { |
| 249 | mutex_lock(&pmc_reserve_mutex); |
| 250 | if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) |
| 251 | err = -EBUSY; |
| 252 | else |
| 253 | atomic_inc(&num_counters); |
| 254 | mutex_unlock(&pmc_reserve_mutex); |
| 255 | } |
| 256 | if (err) |
| 257 | return err; |
| 258 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 259 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 260 | * Generate PMC IRQs: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 261 | * (keep 'enabled' bit clear for now) |
| 262 | */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 263 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 264 | |
| 265 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 266 | * Count user and OS events unless requested not to. |
| 267 | */ |
| 268 | if (!hw_event->exclude_user) |
| 269 | hwc->config |= ARCH_PERFMON_EVENTSEL_USR; |
| 270 | if (!hw_event->exclude_kernel) |
| 271 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
| 272 | |
| 273 | /* |
| 274 | * If privileged enough, allow NMI events: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 275 | */ |
| 276 | hwc->nmi = 0; |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 277 | if (capable(CAP_SYS_ADMIN) && hw_event->nmi) |
| 278 | hwc->nmi = 1; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 279 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 280 | hwc->irq_period = hw_event->irq_period; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 281 | /* |
| 282 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
| 283 | * so we install an artificial 1<<31 period regardless of |
| 284 | * the generic counter period: |
| 285 | */ |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 286 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 287 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) |
| 288 | hwc->irq_period = 0x7FFFFFFF; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 289 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 290 | atomic64_set(&hwc->period_left, hwc->irq_period); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 291 | |
| 292 | /* |
Thomas Gleixner | dfa7c89 | 2008-12-08 19:35:37 +0100 | [diff] [blame] | 293 | * Raw event type provide the config in the event structure |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 294 | */ |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 295 | if (perf_event_raw(hw_event)) { |
| 296 | hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 297 | } else { |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 298 | if (perf_event_id(hw_event) >= pmc_ops->max_events) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 299 | return -EINVAL; |
| 300 | /* |
| 301 | * The generic map: |
| 302 | */ |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 303 | hwc->config |= pmc_ops->event_map(perf_event_id(hw_event)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 304 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 305 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 306 | counter->destroy = hw_perf_counter_destroy; |
| 307 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 308 | return 0; |
| 309 | } |
| 310 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 311 | static u64 pmc_intel_save_disable_all(void) |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 312 | { |
| 313 | u64 ctrl; |
| 314 | |
| 315 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 316 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 317 | |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 318 | return ctrl; |
| 319 | } |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 320 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 321 | static u64 pmc_amd_save_disable_all(void) |
| 322 | { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 323 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 324 | int enabled, idx; |
| 325 | |
| 326 | enabled = cpuc->enabled; |
| 327 | cpuc->enabled = 0; |
Peter Zijlstra | 60b3df9 | 2009-03-13 12:21:30 +0100 | [diff] [blame] | 328 | /* |
| 329 | * ensure we write the disable before we start disabling the |
| 330 | * counters proper, so that pcm_amd_enable() does the right thing. |
| 331 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 332 | barrier(); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 333 | |
| 334 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 335 | u64 val; |
| 336 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 337 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 338 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { |
| 339 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 340 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
| 341 | } |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 342 | } |
| 343 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 344 | return enabled; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 345 | } |
| 346 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 347 | u64 hw_perf_save_disable(void) |
| 348 | { |
| 349 | if (unlikely(!perf_counters_initialized)) |
| 350 | return 0; |
| 351 | |
| 352 | return pmc_ops->save_disable_all(); |
| 353 | } |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 354 | /* |
| 355 | * Exported because of ACPI idle |
| 356 | */ |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 357 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 358 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 359 | static void pmc_intel_restore_all(u64 ctrl) |
| 360 | { |
| 361 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 362 | } |
| 363 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 364 | static void pmc_amd_restore_all(u64 ctrl) |
| 365 | { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 366 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 367 | int idx; |
| 368 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 369 | cpuc->enabled = ctrl; |
| 370 | barrier(); |
| 371 | if (!ctrl) |
| 372 | return; |
| 373 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 374 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 375 | if (test_bit(idx, cpuc->active_mask)) { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 376 | u64 val; |
| 377 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 378 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); |
| 379 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 380 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
| 381 | } |
| 382 | } |
| 383 | } |
| 384 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 385 | void hw_perf_restore(u64 ctrl) |
| 386 | { |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 387 | if (unlikely(!perf_counters_initialized)) |
| 388 | return; |
| 389 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 390 | pmc_ops->restore_all(ctrl); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 391 | } |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 392 | /* |
| 393 | * Exported because of ACPI idle |
| 394 | */ |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 395 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
| 396 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 397 | static u64 pmc_intel_get_status(u64 mask) |
| 398 | { |
| 399 | u64 status; |
| 400 | |
| 401 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 402 | |
| 403 | return status; |
| 404 | } |
| 405 | |
| 406 | static u64 pmc_amd_get_status(u64 mask) |
| 407 | { |
| 408 | u64 status = 0; |
| 409 | int idx; |
| 410 | |
| 411 | for (idx = 0; idx < nr_counters_generic; idx++) { |
| 412 | s64 val; |
| 413 | |
| 414 | if (!(mask & (1 << idx))) |
| 415 | continue; |
| 416 | |
| 417 | rdmsrl(MSR_K7_PERFCTR0 + idx, val); |
| 418 | val <<= (64 - counter_value_bits); |
| 419 | if (val >= 0) |
| 420 | status |= (1 << idx); |
| 421 | } |
| 422 | |
| 423 | return status; |
| 424 | } |
| 425 | |
| 426 | static u64 hw_perf_get_status(u64 mask) |
| 427 | { |
| 428 | if (unlikely(!perf_counters_initialized)) |
| 429 | return 0; |
| 430 | |
| 431 | return pmc_ops->get_status(mask); |
| 432 | } |
| 433 | |
| 434 | static void pmc_intel_ack_status(u64 ack) |
| 435 | { |
| 436 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); |
| 437 | } |
| 438 | |
| 439 | static void pmc_amd_ack_status(u64 ack) |
| 440 | { |
| 441 | } |
| 442 | |
| 443 | static void hw_perf_ack_status(u64 ack) |
| 444 | { |
| 445 | if (unlikely(!perf_counters_initialized)) |
| 446 | return; |
| 447 | |
| 448 | pmc_ops->ack_status(ack); |
| 449 | } |
| 450 | |
| 451 | static void pmc_intel_enable(int idx, u64 config) |
| 452 | { |
| 453 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, |
| 454 | config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
| 455 | } |
| 456 | |
| 457 | static void pmc_amd_enable(int idx, u64 config) |
| 458 | { |
| 459 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 460 | |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 461 | set_bit(idx, cpuc->active_mask); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 462 | if (cpuc->enabled) |
| 463 | config |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 464 | |
| 465 | wrmsrl(MSR_K7_EVNTSEL0 + idx, config); |
| 466 | } |
| 467 | |
| 468 | static void hw_perf_enable(int idx, u64 config) |
| 469 | { |
| 470 | if (unlikely(!perf_counters_initialized)) |
| 471 | return; |
| 472 | |
| 473 | pmc_ops->enable(idx, config); |
| 474 | } |
| 475 | |
| 476 | static void pmc_intel_disable(int idx, u64 config) |
| 477 | { |
| 478 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); |
| 479 | } |
| 480 | |
| 481 | static void pmc_amd_disable(int idx, u64 config) |
| 482 | { |
| 483 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 484 | |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 485 | clear_bit(idx, cpuc->active_mask); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 486 | wrmsrl(MSR_K7_EVNTSEL0 + idx, config); |
| 487 | |
| 488 | } |
| 489 | |
| 490 | static void hw_perf_disable(int idx, u64 config) |
| 491 | { |
| 492 | if (unlikely(!perf_counters_initialized)) |
| 493 | return; |
| 494 | |
| 495 | pmc_ops->disable(idx, config); |
| 496 | } |
| 497 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 498 | static inline void |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 499 | __pmc_fixed_disable(struct perf_counter *counter, |
| 500 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 501 | { |
| 502 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 503 | u64 ctrl_val, mask; |
| 504 | int err; |
| 505 | |
| 506 | mask = 0xfULL << (idx * 4); |
| 507 | |
| 508 | rdmsrl(hwc->config_base, ctrl_val); |
| 509 | ctrl_val &= ~mask; |
| 510 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
| 511 | } |
| 512 | |
| 513 | static inline void |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 514 | __pmc_generic_disable(struct perf_counter *counter, |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 515 | struct hw_perf_counter *hwc, unsigned int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 516 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 517 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 518 | __pmc_fixed_disable(counter, hwc, idx); |
| 519 | else |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 520 | hw_perf_disable(idx, hwc->config); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 521 | } |
| 522 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 523 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 524 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 525 | /* |
| 526 | * Set the next IRQ period, based on the hwc->period_left value. |
| 527 | * To be called with the counter disabled in hw: |
| 528 | */ |
| 529 | static void |
| 530 | __hw_perf_counter_set_period(struct perf_counter *counter, |
| 531 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 532 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 533 | s64 left = atomic64_read(&hwc->period_left); |
Peter Zijlstra | 595258a | 2009-03-13 12:21:28 +0100 | [diff] [blame] | 534 | s64 period = hwc->irq_period; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 535 | int err; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 536 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 537 | /* |
| 538 | * If we are way outside a reasoable range then just skip forward: |
| 539 | */ |
| 540 | if (unlikely(left <= -period)) { |
| 541 | left = period; |
| 542 | atomic64_set(&hwc->period_left, left); |
| 543 | } |
| 544 | |
| 545 | if (unlikely(left <= 0)) { |
| 546 | left += period; |
| 547 | atomic64_set(&hwc->period_left, left); |
| 548 | } |
| 549 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 550 | per_cpu(prev_left[idx], smp_processor_id()) = left; |
| 551 | |
| 552 | /* |
| 553 | * The hw counter starts counting from this counter offset, |
| 554 | * mark it to be able to extra future deltas: |
| 555 | */ |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 556 | atomic64_set(&hwc->prev_count, (u64)-left); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 557 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 558 | err = checking_wrmsrl(hwc->counter_base + idx, |
| 559 | (u64)(-left) & counter_value_mask); |
| 560 | } |
| 561 | |
| 562 | static inline void |
| 563 | __pmc_fixed_enable(struct perf_counter *counter, |
| 564 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 565 | { |
| 566 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 567 | u64 ctrl_val, bits, mask; |
| 568 | int err; |
| 569 | |
| 570 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 571 | * Enable IRQ generation (0x8), |
| 572 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) |
| 573 | * if requested: |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 574 | */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 575 | bits = 0x8ULL; |
| 576 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) |
| 577 | bits |= 0x2; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 578 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) |
| 579 | bits |= 0x1; |
| 580 | bits <<= (idx * 4); |
| 581 | mask = 0xfULL << (idx * 4); |
| 582 | |
| 583 | rdmsrl(hwc->config_base, ctrl_val); |
| 584 | ctrl_val &= ~mask; |
| 585 | ctrl_val |= bits; |
| 586 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 587 | } |
| 588 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 589 | static void |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 590 | __pmc_generic_enable(struct perf_counter *counter, |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 591 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 592 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 593 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 594 | __pmc_fixed_enable(counter, hwc, idx); |
| 595 | else |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 596 | hw_perf_enable(idx, hwc->config); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 597 | } |
| 598 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 599 | static int |
| 600 | fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 601 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 602 | unsigned int event; |
| 603 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 604 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
| 605 | return -1; |
| 606 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 607 | if (unlikely(hwc->nmi)) |
| 608 | return -1; |
| 609 | |
| 610 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
| 611 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 612 | if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 613 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 614 | if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 615 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 616 | if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 617 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
| 618 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 619 | return -1; |
| 620 | } |
| 621 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 622 | /* |
| 623 | * Find a PMC slot for the freshly enabled / scheduled in counter: |
| 624 | */ |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 625 | static int pmc_generic_enable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 626 | { |
| 627 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 628 | struct hw_perf_counter *hwc = &counter->hw; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 629 | int idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 630 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 631 | idx = fixed_mode_idx(counter, hwc); |
| 632 | if (idx >= 0) { |
| 633 | /* |
| 634 | * Try to get the fixed counter, if that is already taken |
| 635 | * then try to get a generic counter: |
| 636 | */ |
| 637 | if (test_and_set_bit(idx, cpuc->used)) |
| 638 | goto try_generic; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 639 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 640 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
| 641 | /* |
| 642 | * We set it so that counter_base + idx in wrmsr/rdmsr maps to |
| 643 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: |
| 644 | */ |
| 645 | hwc->counter_base = |
| 646 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 647 | hwc->idx = idx; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 648 | } else { |
| 649 | idx = hwc->idx; |
| 650 | /* Try to get the previous generic counter again */ |
| 651 | if (test_and_set_bit(idx, cpuc->used)) { |
| 652 | try_generic: |
| 653 | idx = find_first_zero_bit(cpuc->used, nr_counters_generic); |
| 654 | if (idx == nr_counters_generic) |
| 655 | return -EAGAIN; |
| 656 | |
| 657 | set_bit(idx, cpuc->used); |
| 658 | hwc->idx = idx; |
| 659 | } |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 660 | hwc->config_base = pmc_ops->eventsel; |
| 661 | hwc->counter_base = pmc_ops->perfctr; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | perf_counters_lapic_init(hwc->nmi); |
| 665 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 666 | __pmc_generic_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 667 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 668 | cpuc->counters[idx] = counter; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 669 | /* |
| 670 | * Make it visible before enabling the hw: |
| 671 | */ |
| 672 | smp_wmb(); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 673 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 674 | __hw_perf_counter_set_period(counter, hwc, idx); |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 675 | __pmc_generic_enable(counter, hwc, idx); |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 676 | |
| 677 | return 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 678 | } |
| 679 | |
| 680 | void perf_counter_print_debug(void) |
| 681 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 682 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 683 | struct cpu_hw_counters *cpuc; |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 684 | int cpu, idx; |
| 685 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 686 | if (!nr_counters_generic) |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 687 | return; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 688 | |
| 689 | local_irq_disable(); |
| 690 | |
| 691 | cpu = smp_processor_id(); |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 692 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 693 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 694 | if (intel_perfmon_version >= 2) { |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 695 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 696 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 697 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); |
| 698 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 699 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 700 | pr_info("\n"); |
| 701 | pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); |
| 702 | pr_info("CPU#%d: status: %016llx\n", cpu, status); |
| 703 | pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); |
| 704 | pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 705 | } |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 706 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 707 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 708 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 709 | rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); |
| 710 | rdmsrl(pmc_ops->perfctr + idx, pmc_count); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 711 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 712 | prev_left = per_cpu(prev_left[idx], cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 713 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 714 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 715 | cpu, idx, pmc_ctrl); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 716 | pr_info("CPU#%d: gen-PMC%d count: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 717 | cpu, idx, pmc_count); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 718 | pr_info("CPU#%d: gen-PMC%d left: %016llx\n", |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 719 | cpu, idx, prev_left); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 720 | } |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 721 | for (idx = 0; idx < nr_counters_fixed; idx++) { |
| 722 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); |
| 723 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 724 | pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 725 | cpu, idx, pmc_count); |
| 726 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 727 | local_irq_enable(); |
| 728 | } |
| 729 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 730 | static void pmc_generic_disable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 731 | { |
| 732 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 733 | struct hw_perf_counter *hwc = &counter->hw; |
| 734 | unsigned int idx = hwc->idx; |
| 735 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 736 | __pmc_generic_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 737 | |
| 738 | clear_bit(idx, cpuc->used); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 739 | cpuc->counters[idx] = NULL; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 740 | /* |
| 741 | * Make sure the cleared pointer becomes visible before we |
| 742 | * (potentially) free the counter: |
| 743 | */ |
| 744 | smp_wmb(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 745 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 746 | /* |
| 747 | * Drain the remaining delta count out of a counter |
| 748 | * that we are disabling: |
| 749 | */ |
| 750 | x86_perf_counter_update(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 751 | } |
| 752 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 753 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 754 | * Save and restart an expired counter. Called by NMI contexts, |
| 755 | * so it has to be careful about preempting normal counter ops: |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 756 | */ |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 757 | static void perf_save_and_restart(struct perf_counter *counter) |
| 758 | { |
| 759 | struct hw_perf_counter *hwc = &counter->hw; |
| 760 | int idx = hwc->idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 761 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 762 | x86_perf_counter_update(counter, hwc, idx); |
| 763 | __hw_perf_counter_set_period(counter, hwc, idx); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 764 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 765 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 766 | __pmc_generic_enable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 767 | } |
| 768 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 769 | /* |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 770 | * Maximum interrupt frequency of 100KHz per CPU |
| 771 | */ |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 772 | #define PERFMON_MAX_INTERRUPTS (100000/HZ) |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 773 | |
| 774 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 775 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
| 776 | * rules apply: |
| 777 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 778 | static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 779 | { |
| 780 | int bit, cpu = smp_processor_id(); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 781 | u64 ack, status; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 782 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 783 | int ret = 0; |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 784 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 785 | cpuc->throttle_ctrl = hw_perf_save_disable(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 786 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 787 | status = hw_perf_get_status(cpuc->throttle_ctrl); |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 788 | if (!status) |
| 789 | goto out; |
| 790 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 791 | ret = 1; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 792 | again: |
Mike Galbraith | d278c48 | 2009-02-09 07:38:50 +0100 | [diff] [blame] | 793 | inc_irq_stat(apic_perf_irqs); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 794 | ack = status; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 795 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 796 | struct perf_counter *counter = cpuc->counters[bit]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 797 | |
| 798 | clear_bit(bit, (unsigned long *) &status); |
| 799 | if (!counter) |
| 800 | continue; |
| 801 | |
| 802 | perf_save_and_restart(counter); |
Peter Zijlstra | 0322cd6 | 2009-03-19 20:26:19 +0100 | [diff] [blame] | 803 | perf_counter_output(counter, nmi, regs); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 804 | } |
| 805 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 806 | hw_perf_ack_status(ack); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 807 | |
| 808 | /* |
| 809 | * Repeat if there is more work to be done: |
| 810 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 811 | status = hw_perf_get_status(cpuc->throttle_ctrl); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 812 | if (status) |
| 813 | goto again; |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 814 | out: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 815 | /* |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 816 | * Restore - do not reenable when global enable is off or throttled: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 817 | */ |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 818 | if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 819 | hw_perf_restore(cpuc->throttle_ctrl); |
| 820 | |
| 821 | return ret; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 822 | } |
| 823 | |
| 824 | void perf_counter_unthrottle(void) |
| 825 | { |
| 826 | struct cpu_hw_counters *cpuc; |
| 827 | |
| 828 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 829 | return; |
| 830 | |
| 831 | if (unlikely(!perf_counters_initialized)) |
| 832 | return; |
| 833 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 834 | cpuc = &__get_cpu_var(cpu_hw_counters); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 835 | if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 836 | if (printk_ratelimit()) |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 837 | printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 838 | hw_perf_restore(cpuc->throttle_ctrl); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 839 | } |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 840 | cpuc->interrupts = 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 841 | } |
| 842 | |
| 843 | void smp_perf_counter_interrupt(struct pt_regs *regs) |
| 844 | { |
| 845 | irq_enter(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 846 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 847 | ack_APIC_irq(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 848 | __smp_perf_counter_interrupt(regs, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 849 | irq_exit(); |
| 850 | } |
| 851 | |
Mike Galbraith | 3415dd9 | 2009-01-23 14:16:53 +0100 | [diff] [blame] | 852 | void perf_counters_lapic_init(int nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 853 | { |
| 854 | u32 apic_val; |
| 855 | |
| 856 | if (!perf_counters_initialized) |
| 857 | return; |
| 858 | /* |
| 859 | * Enable the performance counter vector in the APIC LVT: |
| 860 | */ |
| 861 | apic_val = apic_read(APIC_LVTERR); |
| 862 | |
| 863 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); |
| 864 | if (nmi) |
| 865 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 866 | else |
| 867 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 868 | apic_write(APIC_LVTERR, apic_val); |
| 869 | } |
| 870 | |
| 871 | static int __kprobes |
| 872 | perf_counter_nmi_handler(struct notifier_block *self, |
| 873 | unsigned long cmd, void *__args) |
| 874 | { |
| 875 | struct die_args *args = __args; |
| 876 | struct pt_regs *regs; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 877 | int ret; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 878 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 879 | switch (cmd) { |
| 880 | case DIE_NMI: |
| 881 | case DIE_NMI_IPI: |
| 882 | break; |
| 883 | |
| 884 | default: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 885 | return NOTIFY_DONE; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 886 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 887 | |
| 888 | regs = args->regs; |
| 889 | |
| 890 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 891 | ret = __smp_perf_counter_interrupt(regs, 1); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 892 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 893 | return ret ? NOTIFY_STOP : NOTIFY_OK; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 894 | } |
| 895 | |
| 896 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { |
Mike Galbraith | 5b75af0 | 2009-02-04 17:11:34 +0100 | [diff] [blame] | 897 | .notifier_call = perf_counter_nmi_handler, |
| 898 | .next = NULL, |
| 899 | .priority = 1 |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 900 | }; |
| 901 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 902 | static struct pmc_x86_ops pmc_intel_ops = { |
| 903 | .save_disable_all = pmc_intel_save_disable_all, |
| 904 | .restore_all = pmc_intel_restore_all, |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 905 | .get_status = pmc_intel_get_status, |
| 906 | .ack_status = pmc_intel_ack_status, |
| 907 | .enable = pmc_intel_enable, |
| 908 | .disable = pmc_intel_disable, |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 909 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
| 910 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, |
| 911 | .event_map = pmc_intel_event_map, |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 912 | .raw_event = pmc_intel_raw_event, |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 913 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
| 914 | }; |
| 915 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 916 | static struct pmc_x86_ops pmc_amd_ops = { |
| 917 | .save_disable_all = pmc_amd_save_disable_all, |
| 918 | .restore_all = pmc_amd_restore_all, |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 919 | .get_status = pmc_amd_get_status, |
| 920 | .ack_status = pmc_amd_ack_status, |
| 921 | .enable = pmc_amd_enable, |
| 922 | .disable = pmc_amd_disable, |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 923 | .eventsel = MSR_K7_EVNTSEL0, |
| 924 | .perfctr = MSR_K7_PERFCTR0, |
| 925 | .event_map = pmc_amd_event_map, |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 926 | .raw_event = pmc_amd_raw_event, |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 927 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
| 928 | }; |
| 929 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 930 | static struct pmc_x86_ops *pmc_intel_init(void) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 931 | { |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 932 | union cpuid10_edx edx; |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 933 | union cpuid10_eax eax; |
| 934 | unsigned int unused; |
| 935 | unsigned int ebx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 936 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 937 | /* |
| 938 | * Check whether the Architectural PerfMon supports |
| 939 | * Branch Misses Retired Event or not. |
| 940 | */ |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 941 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 942 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 943 | return NULL; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 944 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 945 | intel_perfmon_version = eax.split.version_id; |
| 946 | if (intel_perfmon_version < 2) |
| 947 | return NULL; |
| 948 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 949 | pr_info("Intel Performance Monitoring support detected.\n"); |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 950 | pr_info("... version: %d\n", intel_perfmon_version); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 951 | pr_info("... bit width: %d\n", eax.split.bit_width); |
| 952 | pr_info("... mask length: %d\n", eax.split.mask_length); |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 953 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 954 | nr_counters_generic = eax.split.num_counters; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 955 | nr_counters_fixed = edx.split.num_counters_fixed; |
| 956 | counter_value_mask = (1ULL << eax.split.bit_width) - 1; |
| 957 | |
| 958 | return &pmc_intel_ops; |
| 959 | } |
| 960 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 961 | static struct pmc_x86_ops *pmc_amd_init(void) |
| 962 | { |
| 963 | nr_counters_generic = 4; |
| 964 | nr_counters_fixed = 0; |
Peter Zijlstra | b5e8acf | 2009-03-05 20:34:21 +0100 | [diff] [blame] | 965 | counter_value_mask = 0x0000FFFFFFFFFFFFULL; |
| 966 | counter_value_bits = 48; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 967 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 968 | pr_info("AMD Performance Monitoring support detected.\n"); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 969 | |
| 970 | return &pmc_amd_ops; |
| 971 | } |
| 972 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 973 | void __init init_hw_perf_counters(void) |
| 974 | { |
| 975 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 976 | return; |
| 977 | |
| 978 | switch (boot_cpu_data.x86_vendor) { |
| 979 | case X86_VENDOR_INTEL: |
| 980 | pmc_ops = pmc_intel_init(); |
| 981 | break; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 982 | case X86_VENDOR_AMD: |
| 983 | pmc_ops = pmc_amd_init(); |
| 984 | break; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 985 | } |
| 986 | if (!pmc_ops) |
| 987 | return; |
| 988 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 989 | pr_info("... num counters: %d\n", nr_counters_generic); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 990 | if (nr_counters_generic > X86_PMC_MAX_GENERIC) { |
| 991 | nr_counters_generic = X86_PMC_MAX_GENERIC; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 992 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 993 | nr_counters_generic, X86_PMC_MAX_GENERIC); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 994 | } |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 995 | perf_counter_mask = (1 << nr_counters_generic) - 1; |
| 996 | perf_max_counters = nr_counters_generic; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 997 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 998 | pr_info("... value mask: %016Lx\n", counter_value_mask); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 999 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 1000 | if (nr_counters_fixed > X86_PMC_MAX_FIXED) { |
| 1001 | nr_counters_fixed = X86_PMC_MAX_FIXED; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 1002 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 1003 | nr_counters_fixed, X86_PMC_MAX_FIXED); |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 1004 | } |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 1005 | pr_info("... fixed counters: %d\n", nr_counters_fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1006 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 1007 | perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
| 1008 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 1009 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); |
Ingo Molnar | 75f224c | 2008-12-14 21:58:46 +0100 | [diff] [blame] | 1010 | perf_counters_initialized = true; |
| 1011 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1012 | perf_counters_lapic_init(0); |
| 1013 | register_die_notifier(&perf_counter_nmi_notifier); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1014 | } |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1015 | |
Ingo Molnar | eb2b861 | 2008-12-17 09:09:13 +0100 | [diff] [blame] | 1016 | static void pmc_generic_read(struct perf_counter *counter) |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 1017 | { |
| 1018 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
| 1019 | } |
| 1020 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 1021 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 1022 | .enable = pmc_generic_enable, |
| 1023 | .disable = pmc_generic_disable, |
| 1024 | .read = pmc_generic_read, |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1025 | }; |
| 1026 | |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 1027 | const struct hw_perf_counter_ops * |
| 1028 | hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1029 | { |
| 1030 | int err; |
| 1031 | |
| 1032 | err = __hw_perf_counter_init(counter); |
| 1033 | if (err) |
Peter Zijlstra | 9ea98e1 | 2009-03-30 19:07:09 +0200 | [diff] [blame] | 1034 | return ERR_PTR(err); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1035 | |
| 1036 | return &x86_perf_counter_ops; |
| 1037 | } |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1038 | |
| 1039 | /* |
| 1040 | * callchain support |
| 1041 | */ |
| 1042 | |
| 1043 | static inline |
| 1044 | void callchain_store(struct perf_callchain_entry *entry, unsigned long ip) |
| 1045 | { |
| 1046 | if (entry->nr < MAX_STACK_DEPTH) |
| 1047 | entry->ip[entry->nr++] = ip; |
| 1048 | } |
| 1049 | |
| 1050 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); |
| 1051 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); |
| 1052 | |
| 1053 | |
| 1054 | static void |
| 1055 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) |
| 1056 | { |
| 1057 | /* Ignore warnings */ |
| 1058 | } |
| 1059 | |
| 1060 | static void backtrace_warning(void *data, char *msg) |
| 1061 | { |
| 1062 | /* Ignore warnings */ |
| 1063 | } |
| 1064 | |
| 1065 | static int backtrace_stack(void *data, char *name) |
| 1066 | { |
| 1067 | /* Don't bother with IRQ stacks for now */ |
| 1068 | return -1; |
| 1069 | } |
| 1070 | |
| 1071 | static void backtrace_address(void *data, unsigned long addr, int reliable) |
| 1072 | { |
| 1073 | struct perf_callchain_entry *entry = data; |
| 1074 | |
| 1075 | if (reliable) |
| 1076 | callchain_store(entry, addr); |
| 1077 | } |
| 1078 | |
| 1079 | static const struct stacktrace_ops backtrace_ops = { |
| 1080 | .warning = backtrace_warning, |
| 1081 | .warning_symbol = backtrace_warning_symbol, |
| 1082 | .stack = backtrace_stack, |
| 1083 | .address = backtrace_address, |
| 1084 | }; |
| 1085 | |
| 1086 | static void |
| 1087 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1088 | { |
| 1089 | unsigned long bp; |
| 1090 | char *stack; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame^] | 1091 | int nr = entry->nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1092 | |
| 1093 | callchain_store(entry, instruction_pointer(regs)); |
| 1094 | |
| 1095 | stack = ((char *)regs + sizeof(struct pt_regs)); |
| 1096 | #ifdef CONFIG_FRAME_POINTER |
| 1097 | bp = frame_pointer(regs); |
| 1098 | #else |
| 1099 | bp = 0; |
| 1100 | #endif |
| 1101 | |
| 1102 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry); |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame^] | 1103 | |
| 1104 | entry->kernel = entry->nr - nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1105 | } |
| 1106 | |
| 1107 | |
| 1108 | struct stack_frame { |
| 1109 | const void __user *next_fp; |
| 1110 | unsigned long return_address; |
| 1111 | }; |
| 1112 | |
| 1113 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) |
| 1114 | { |
| 1115 | int ret; |
| 1116 | |
| 1117 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) |
| 1118 | return 0; |
| 1119 | |
| 1120 | ret = 1; |
| 1121 | pagefault_disable(); |
| 1122 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) |
| 1123 | ret = 0; |
| 1124 | pagefault_enable(); |
| 1125 | |
| 1126 | return ret; |
| 1127 | } |
| 1128 | |
| 1129 | static void |
| 1130 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1131 | { |
| 1132 | struct stack_frame frame; |
| 1133 | const void __user *fp; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame^] | 1134 | int nr = entry->nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1135 | |
| 1136 | regs = (struct pt_regs *)current->thread.sp0 - 1; |
| 1137 | fp = (void __user *)regs->bp; |
| 1138 | |
| 1139 | callchain_store(entry, regs->ip); |
| 1140 | |
| 1141 | while (entry->nr < MAX_STACK_DEPTH) { |
| 1142 | frame.next_fp = NULL; |
| 1143 | frame.return_address = 0; |
| 1144 | |
| 1145 | if (!copy_stack_frame(fp, &frame)) |
| 1146 | break; |
| 1147 | |
| 1148 | if ((unsigned long)fp < user_stack_pointer(regs)) |
| 1149 | break; |
| 1150 | |
| 1151 | callchain_store(entry, frame.return_address); |
| 1152 | fp = frame.next_fp; |
| 1153 | } |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame^] | 1154 | |
| 1155 | entry->user = entry->nr - nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1156 | } |
| 1157 | |
| 1158 | static void |
| 1159 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1160 | { |
| 1161 | int is_user; |
| 1162 | |
| 1163 | if (!regs) |
| 1164 | return; |
| 1165 | |
| 1166 | is_user = user_mode(regs); |
| 1167 | |
| 1168 | if (!current || current->pid == 0) |
| 1169 | return; |
| 1170 | |
| 1171 | if (is_user && current->state != TASK_RUNNING) |
| 1172 | return; |
| 1173 | |
| 1174 | if (!is_user) |
| 1175 | perf_callchain_kernel(regs, entry); |
| 1176 | |
| 1177 | if (current->mm) |
| 1178 | perf_callchain_user(regs, entry); |
| 1179 | } |
| 1180 | |
| 1181 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) |
| 1182 | { |
| 1183 | struct perf_callchain_entry *entry; |
| 1184 | |
| 1185 | if (in_nmi()) |
| 1186 | entry = &__get_cpu_var(nmi_entry); |
| 1187 | else |
| 1188 | entry = &__get_cpu_var(irq_entry); |
| 1189 | |
| 1190 | entry->nr = 0; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame^] | 1191 | entry->hv = 0; |
| 1192 | entry->kernel = 0; |
| 1193 | entry->user = 0; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1194 | |
| 1195 | perf_do_callchain(regs, entry); |
| 1196 | |
| 1197 | return entry; |
| 1198 | } |