Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counter x86 architecture code |
| 3 | * |
| 4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 6 | * Copyright(C) 2009 Jaswinder Singh Rajput |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 7 | * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 8 | * |
| 9 | * For licencing details see kernel-base/COPYING |
| 10 | */ |
| 11 | |
| 12 | #include <linux/perf_counter.h> |
| 13 | #include <linux/capability.h> |
| 14 | #include <linux/notifier.h> |
| 15 | #include <linux/hardirq.h> |
| 16 | #include <linux/kprobes.h> |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 17 | #include <linux/module.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 18 | #include <linux/kdebug.h> |
| 19 | #include <linux/sched.h> |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 21 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 22 | #include <asm/apic.h> |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 23 | #include <asm/stacktrace.h> |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 24 | #include <asm/nmi.h> |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 25 | |
| 26 | static bool perf_counters_initialized __read_mostly; |
| 27 | |
| 28 | /* |
| 29 | * Number of (generic) HW counters: |
| 30 | */ |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 31 | static int nr_counters_generic __read_mostly; |
| 32 | static u64 perf_counter_mask __read_mostly; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 33 | static u64 counter_value_mask __read_mostly; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 34 | static int counter_value_bits __read_mostly; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 35 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 36 | static int nr_counters_fixed __read_mostly; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 37 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 38 | struct cpu_hw_counters { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 39 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
| 40 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 41 | unsigned long interrupts; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 42 | u64 throttle_ctrl; |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 43 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 44 | int enabled; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | /* |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 48 | * struct x86_pmu - generic x86 pmu |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 49 | */ |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 50 | struct x86_pmu { |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 51 | int (*handle_irq)(struct pt_regs *, int); |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 52 | u64 (*save_disable_all)(void); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 53 | void (*restore_all)(u64); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 54 | void (*enable)(int, u64); |
| 55 | void (*disable)(int, u64); |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 56 | unsigned eventsel; |
| 57 | unsigned perfctr; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 58 | u64 (*event_map)(int); |
| 59 | u64 (*raw_event)(u64); |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 60 | int max_events; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 61 | }; |
| 62 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 63 | static struct x86_pmu *x86_pmu __read_mostly; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 64 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 65 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { |
| 66 | .enabled = 1, |
| 67 | }; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 68 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 69 | static __read_mostly int intel_perfmon_version; |
| 70 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 71 | /* |
| 72 | * Intel PerfMon v3. Used on Core2 and later. |
| 73 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 74 | static const u64 intel_perfmon_event_map[] = |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 75 | { |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 76 | [PERF_COUNT_CPU_CYCLES] = 0x003c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 77 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 78 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, |
| 79 | [PERF_COUNT_CACHE_MISSES] = 0x412e, |
| 80 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 81 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 82 | [PERF_COUNT_BUS_CYCLES] = 0x013c, |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 83 | }; |
| 84 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 85 | static u64 intel_pmu_event_map(int event) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 86 | { |
| 87 | return intel_perfmon_event_map[event]; |
| 88 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 89 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 90 | static u64 intel_pmu_raw_event(u64 event) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 91 | { |
Peter Zijlstra | 82bae4f8 | 2009-03-13 12:21:31 +0100 | [diff] [blame] | 92 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL |
| 93 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL |
| 94 | #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 95 | |
| 96 | #define CORE_EVNTSEL_MASK \ |
| 97 | (CORE_EVNTSEL_EVENT_MASK | \ |
| 98 | CORE_EVNTSEL_UNIT_MASK | \ |
| 99 | CORE_EVNTSEL_COUNTER_MASK) |
| 100 | |
| 101 | return event & CORE_EVNTSEL_MASK; |
| 102 | } |
| 103 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 104 | /* |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 105 | * AMD Performance Monitor K7 and later. |
| 106 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 107 | static const u64 amd_perfmon_event_map[] = |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 108 | { |
| 109 | [PERF_COUNT_CPU_CYCLES] = 0x0076, |
| 110 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, |
| 111 | [PERF_COUNT_CACHE_REFERENCES] = 0x0080, |
| 112 | [PERF_COUNT_CACHE_MISSES] = 0x0081, |
| 113 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, |
| 114 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, |
| 115 | }; |
| 116 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 117 | static u64 amd_pmu_event_map(int event) |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 118 | { |
| 119 | return amd_perfmon_event_map[event]; |
| 120 | } |
| 121 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 122 | static u64 amd_pmu_raw_event(u64 event) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 123 | { |
Peter Zijlstra | 82bae4f8 | 2009-03-13 12:21:31 +0100 | [diff] [blame] | 124 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL |
| 125 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL |
| 126 | #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 127 | |
| 128 | #define K7_EVNTSEL_MASK \ |
| 129 | (K7_EVNTSEL_EVENT_MASK | \ |
| 130 | K7_EVNTSEL_UNIT_MASK | \ |
| 131 | K7_EVNTSEL_COUNTER_MASK) |
| 132 | |
| 133 | return event & K7_EVNTSEL_MASK; |
| 134 | } |
| 135 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 136 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 137 | * Propagate counter elapsed time into the generic counter. |
| 138 | * Can only be executed on the CPU where the counter is active. |
| 139 | * Returns the delta events processed. |
| 140 | */ |
| 141 | static void |
| 142 | x86_perf_counter_update(struct perf_counter *counter, |
| 143 | struct hw_perf_counter *hwc, int idx) |
| 144 | { |
| 145 | u64 prev_raw_count, new_raw_count, delta; |
| 146 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 147 | /* |
| 148 | * Careful: an NMI might modify the previous counter value. |
| 149 | * |
| 150 | * Our tactic to handle this is to first atomically read and |
| 151 | * exchange a new raw count - then add that new-prev delta |
| 152 | * count to the generic counter atomically: |
| 153 | */ |
| 154 | again: |
| 155 | prev_raw_count = atomic64_read(&hwc->prev_count); |
| 156 | rdmsrl(hwc->counter_base + idx, new_raw_count); |
| 157 | |
| 158 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 159 | new_raw_count) != prev_raw_count) |
| 160 | goto again; |
| 161 | |
| 162 | /* |
| 163 | * Now we have the new raw value and have updated the prev |
| 164 | * timestamp already. We can now calculate the elapsed delta |
| 165 | * (counter-)time and add that to the generic counter. |
| 166 | * |
| 167 | * Careful, not all hw sign-extends above the physical width |
| 168 | * of the count, so we do that by clipping the delta to 32 bits: |
| 169 | */ |
| 170 | delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 171 | |
| 172 | atomic64_add(delta, &counter->count); |
| 173 | atomic64_sub(delta, &hwc->period_left); |
| 174 | } |
| 175 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 176 | static atomic_t num_counters; |
| 177 | static DEFINE_MUTEX(pmc_reserve_mutex); |
| 178 | |
| 179 | static bool reserve_pmc_hardware(void) |
| 180 | { |
| 181 | int i; |
| 182 | |
| 183 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 184 | disable_lapic_nmi_watchdog(); |
| 185 | |
| 186 | for (i = 0; i < nr_counters_generic; i++) { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 187 | if (!reserve_perfctr_nmi(x86_pmu->perfctr + i)) |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 188 | goto perfctr_fail; |
| 189 | } |
| 190 | |
| 191 | for (i = 0; i < nr_counters_generic; i++) { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 192 | if (!reserve_evntsel_nmi(x86_pmu->eventsel + i)) |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 193 | goto eventsel_fail; |
| 194 | } |
| 195 | |
| 196 | return true; |
| 197 | |
| 198 | eventsel_fail: |
| 199 | for (i--; i >= 0; i--) |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 200 | release_evntsel_nmi(x86_pmu->eventsel + i); |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 201 | |
| 202 | i = nr_counters_generic; |
| 203 | |
| 204 | perfctr_fail: |
| 205 | for (i--; i >= 0; i--) |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 206 | release_perfctr_nmi(x86_pmu->perfctr + i); |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 207 | |
| 208 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 209 | enable_lapic_nmi_watchdog(); |
| 210 | |
| 211 | return false; |
| 212 | } |
| 213 | |
| 214 | static void release_pmc_hardware(void) |
| 215 | { |
| 216 | int i; |
| 217 | |
| 218 | for (i = 0; i < nr_counters_generic; i++) { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 219 | release_perfctr_nmi(x86_pmu->perfctr + i); |
| 220 | release_evntsel_nmi(x86_pmu->eventsel + i); |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 224 | enable_lapic_nmi_watchdog(); |
| 225 | } |
| 226 | |
| 227 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
| 228 | { |
| 229 | if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) { |
| 230 | release_pmc_hardware(); |
| 231 | mutex_unlock(&pmc_reserve_mutex); |
| 232 | } |
| 233 | } |
| 234 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 235 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 236 | * Setup the hardware configuration for a given hw_event_type |
| 237 | */ |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 238 | static int __hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 239 | { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 240 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 241 | struct hw_perf_counter *hwc = &counter->hw; |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 242 | int err; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 243 | |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 244 | /* disable temporarily */ |
| 245 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
| 246 | return -ENOSYS; |
| 247 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 248 | if (unlikely(!perf_counters_initialized)) |
| 249 | return -EINVAL; |
| 250 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 251 | err = 0; |
| 252 | if (atomic_inc_not_zero(&num_counters)) { |
| 253 | mutex_lock(&pmc_reserve_mutex); |
| 254 | if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware()) |
| 255 | err = -EBUSY; |
| 256 | else |
| 257 | atomic_inc(&num_counters); |
| 258 | mutex_unlock(&pmc_reserve_mutex); |
| 259 | } |
| 260 | if (err) |
| 261 | return err; |
| 262 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 263 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 264 | * Generate PMC IRQs: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 265 | * (keep 'enabled' bit clear for now) |
| 266 | */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 267 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 268 | |
| 269 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 270 | * Count user and OS events unless requested not to. |
| 271 | */ |
| 272 | if (!hw_event->exclude_user) |
| 273 | hwc->config |= ARCH_PERFMON_EVENTSEL_USR; |
| 274 | if (!hw_event->exclude_kernel) |
| 275 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
| 276 | |
| 277 | /* |
| 278 | * If privileged enough, allow NMI events: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 279 | */ |
| 280 | hwc->nmi = 0; |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 281 | if (capable(CAP_SYS_ADMIN) && hw_event->nmi) |
| 282 | hwc->nmi = 1; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 283 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 284 | hwc->irq_period = hw_event->irq_period; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 285 | /* |
| 286 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
| 287 | * so we install an artificial 1<<31 period regardless of |
| 288 | * the generic counter period: |
| 289 | */ |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 290 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 291 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) |
| 292 | hwc->irq_period = 0x7FFFFFFF; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 293 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 294 | atomic64_set(&hwc->period_left, hwc->irq_period); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 295 | |
| 296 | /* |
Thomas Gleixner | dfa7c89 | 2008-12-08 19:35:37 +0100 | [diff] [blame] | 297 | * Raw event type provide the config in the event structure |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 298 | */ |
Peter Zijlstra | f4a2deb4 | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 299 | if (perf_event_raw(hw_event)) { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 300 | hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 301 | } else { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 302 | if (perf_event_id(hw_event) >= x86_pmu->max_events) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 303 | return -EINVAL; |
| 304 | /* |
| 305 | * The generic map: |
| 306 | */ |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 307 | hwc->config |= x86_pmu->event_map(perf_event_id(hw_event)); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 308 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 309 | |
Peter Zijlstra | 4e935e4 | 2009-03-30 19:07:16 +0200 | [diff] [blame] | 310 | counter->destroy = hw_perf_counter_destroy; |
| 311 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 312 | return 0; |
| 313 | } |
| 314 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 315 | static u64 intel_pmu_save_disable_all(void) |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 316 | { |
| 317 | u64 ctrl; |
| 318 | |
| 319 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 320 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 321 | |
Thomas Gleixner | 4ac1329 | 2008-12-09 21:43:39 +0100 | [diff] [blame] | 322 | return ctrl; |
| 323 | } |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 324 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 325 | static u64 amd_pmu_save_disable_all(void) |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 326 | { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 327 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 328 | int enabled, idx; |
| 329 | |
| 330 | enabled = cpuc->enabled; |
| 331 | cpuc->enabled = 0; |
Peter Zijlstra | 60b3df9 | 2009-03-13 12:21:30 +0100 | [diff] [blame] | 332 | /* |
| 333 | * ensure we write the disable before we start disabling the |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 334 | * counters proper, so that amd_pmu_enable_counter() does the |
| 335 | * right thing. |
Peter Zijlstra | 60b3df9 | 2009-03-13 12:21:30 +0100 | [diff] [blame] | 336 | */ |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 337 | barrier(); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 338 | |
| 339 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 340 | u64 val; |
| 341 | |
Robert Richter | 4295ee6 | 2009-04-29 12:47:01 +0200 | [diff] [blame] | 342 | if (!test_bit(idx, cpuc->active_mask)) |
| 343 | continue; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 344 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); |
Robert Richter | 4295ee6 | 2009-04-29 12:47:01 +0200 | [diff] [blame] | 345 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) |
| 346 | continue; |
| 347 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 348 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 349 | } |
| 350 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 351 | return enabled; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 352 | } |
| 353 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 354 | u64 hw_perf_save_disable(void) |
| 355 | { |
| 356 | if (unlikely(!perf_counters_initialized)) |
| 357 | return 0; |
| 358 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 359 | return x86_pmu->save_disable_all(); |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 360 | } |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 361 | /* |
| 362 | * Exported because of ACPI idle |
| 363 | */ |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 364 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 365 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 366 | static void intel_pmu_restore_all(u64 ctrl) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 367 | { |
| 368 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 369 | } |
| 370 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 371 | static void amd_pmu_restore_all(u64 ctrl) |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 372 | { |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 373 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 374 | int idx; |
| 375 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 376 | cpuc->enabled = ctrl; |
| 377 | barrier(); |
| 378 | if (!ctrl) |
| 379 | return; |
| 380 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 381 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Robert Richter | 4295ee6 | 2009-04-29 12:47:01 +0200 | [diff] [blame] | 382 | u64 val; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 383 | |
Robert Richter | 4295ee6 | 2009-04-29 12:47:01 +0200 | [diff] [blame] | 384 | if (!test_bit(idx, cpuc->active_mask)) |
| 385 | continue; |
| 386 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); |
| 387 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) |
| 388 | continue; |
| 389 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 390 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 391 | } |
| 392 | } |
| 393 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 394 | void hw_perf_restore(u64 ctrl) |
| 395 | { |
Ingo Molnar | 2b9ff0d | 2008-12-14 18:36:30 +0100 | [diff] [blame] | 396 | if (unlikely(!perf_counters_initialized)) |
| 397 | return; |
| 398 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 399 | x86_pmu->restore_all(ctrl); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 400 | } |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 401 | /* |
| 402 | * Exported because of ACPI idle |
| 403 | */ |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 404 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
| 405 | |
Robert Richter | b7f8859 | 2009-04-29 12:47:06 +0200 | [diff] [blame] | 406 | static inline u64 intel_pmu_get_status(u64 mask) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 407 | { |
| 408 | u64 status; |
| 409 | |
Robert Richter | b7f8859 | 2009-04-29 12:47:06 +0200 | [diff] [blame] | 410 | if (unlikely(!perf_counters_initialized)) |
| 411 | return 0; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 412 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 413 | |
| 414 | return status; |
| 415 | } |
| 416 | |
Robert Richter | dee5d90 | 2009-04-29 12:47:07 +0200 | [diff] [blame] | 417 | static inline void intel_pmu_ack_status(u64 ack) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 418 | { |
| 419 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); |
| 420 | } |
| 421 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 422 | static void intel_pmu_enable_counter(int idx, u64 config) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 423 | { |
| 424 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, |
| 425 | config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
| 426 | } |
| 427 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 428 | static void amd_pmu_enable_counter(int idx, u64 config) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 429 | { |
| 430 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 431 | |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 432 | set_bit(idx, cpuc->active_mask); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 433 | if (cpuc->enabled) |
| 434 | config |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 435 | |
| 436 | wrmsrl(MSR_K7_EVNTSEL0 + idx, config); |
| 437 | } |
| 438 | |
| 439 | static void hw_perf_enable(int idx, u64 config) |
| 440 | { |
| 441 | if (unlikely(!perf_counters_initialized)) |
| 442 | return; |
| 443 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 444 | x86_pmu->enable(idx, config); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 445 | } |
| 446 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 447 | static void intel_pmu_disable_counter(int idx, u64 config) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 448 | { |
| 449 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); |
| 450 | } |
| 451 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 452 | static void amd_pmu_disable_counter(int idx, u64 config) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 453 | { |
| 454 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 455 | |
Peter Zijlstra | 184fe4ab | 2009-03-08 11:34:19 +0100 | [diff] [blame] | 456 | clear_bit(idx, cpuc->active_mask); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 457 | wrmsrl(MSR_K7_EVNTSEL0 + idx, config); |
| 458 | |
| 459 | } |
| 460 | |
| 461 | static void hw_perf_disable(int idx, u64 config) |
| 462 | { |
| 463 | if (unlikely(!perf_counters_initialized)) |
| 464 | return; |
| 465 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 466 | x86_pmu->disable(idx, config); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 467 | } |
| 468 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 469 | static inline void |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 470 | __pmc_fixed_disable(struct perf_counter *counter, |
| 471 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 472 | { |
| 473 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 474 | u64 ctrl_val, mask; |
| 475 | int err; |
| 476 | |
| 477 | mask = 0xfULL << (idx * 4); |
| 478 | |
| 479 | rdmsrl(hwc->config_base, ctrl_val); |
| 480 | ctrl_val &= ~mask; |
| 481 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
| 482 | } |
| 483 | |
| 484 | static inline void |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 485 | __x86_pmu_disable(struct perf_counter *counter, |
| 486 | struct hw_perf_counter *hwc, unsigned int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 487 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 488 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 489 | __pmc_fixed_disable(counter, hwc, idx); |
| 490 | else |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 491 | hw_perf_disable(idx, hwc->config); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 492 | } |
| 493 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 494 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 495 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 496 | /* |
| 497 | * Set the next IRQ period, based on the hwc->period_left value. |
| 498 | * To be called with the counter disabled in hw: |
| 499 | */ |
| 500 | static void |
Robert Richter | 26816c2 | 2009-04-29 12:47:08 +0200 | [diff] [blame^] | 501 | x86_perf_counter_set_period(struct perf_counter *counter, |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 502 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 503 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 504 | s64 left = atomic64_read(&hwc->period_left); |
Peter Zijlstra | 595258a | 2009-03-13 12:21:28 +0100 | [diff] [blame] | 505 | s64 period = hwc->irq_period; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 506 | int err; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 507 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 508 | /* |
| 509 | * If we are way outside a reasoable range then just skip forward: |
| 510 | */ |
| 511 | if (unlikely(left <= -period)) { |
| 512 | left = period; |
| 513 | atomic64_set(&hwc->period_left, left); |
| 514 | } |
| 515 | |
| 516 | if (unlikely(left <= 0)) { |
| 517 | left += period; |
| 518 | atomic64_set(&hwc->period_left, left); |
| 519 | } |
| 520 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 521 | per_cpu(prev_left[idx], smp_processor_id()) = left; |
| 522 | |
| 523 | /* |
| 524 | * The hw counter starts counting from this counter offset, |
| 525 | * mark it to be able to extra future deltas: |
| 526 | */ |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 527 | atomic64_set(&hwc->prev_count, (u64)-left); |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 528 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 529 | err = checking_wrmsrl(hwc->counter_base + idx, |
| 530 | (u64)(-left) & counter_value_mask); |
| 531 | } |
| 532 | |
| 533 | static inline void |
| 534 | __pmc_fixed_enable(struct perf_counter *counter, |
| 535 | struct hw_perf_counter *hwc, unsigned int __idx) |
| 536 | { |
| 537 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 538 | u64 ctrl_val, bits, mask; |
| 539 | int err; |
| 540 | |
| 541 | /* |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 542 | * Enable IRQ generation (0x8), |
| 543 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) |
| 544 | * if requested: |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 545 | */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 546 | bits = 0x8ULL; |
| 547 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) |
| 548 | bits |= 0x2; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 549 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) |
| 550 | bits |= 0x1; |
| 551 | bits <<= (idx * 4); |
| 552 | mask = 0xfULL << (idx * 4); |
| 553 | |
| 554 | rdmsrl(hwc->config_base, ctrl_val); |
| 555 | ctrl_val &= ~mask; |
| 556 | ctrl_val |= bits; |
| 557 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 558 | } |
| 559 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 560 | static void |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 561 | __x86_pmu_enable(struct perf_counter *counter, |
| 562 | struct hw_perf_counter *hwc, int idx) |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 563 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 564 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) |
Jaswinder Singh Rajput | 2b583d8 | 2008-12-27 19:15:43 +0530 | [diff] [blame] | 565 | __pmc_fixed_enable(counter, hwc, idx); |
| 566 | else |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 567 | hw_perf_enable(idx, hwc->config); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 568 | } |
| 569 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 570 | static int |
| 571 | fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 572 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 573 | unsigned int event; |
| 574 | |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 575 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
| 576 | return -1; |
| 577 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 578 | if (unlikely(hwc->nmi)) |
| 579 | return -1; |
| 580 | |
| 581 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
| 582 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 583 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 584 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 585 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 586 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 587 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES))) |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 588 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
| 589 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 590 | return -1; |
| 591 | } |
| 592 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 593 | /* |
| 594 | * Find a PMC slot for the freshly enabled / scheduled in counter: |
| 595 | */ |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 596 | static int x86_pmu_enable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 597 | { |
| 598 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 599 | struct hw_perf_counter *hwc = &counter->hw; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 600 | int idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 601 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 602 | idx = fixed_mode_idx(counter, hwc); |
| 603 | if (idx >= 0) { |
| 604 | /* |
| 605 | * Try to get the fixed counter, if that is already taken |
| 606 | * then try to get a generic counter: |
| 607 | */ |
| 608 | if (test_and_set_bit(idx, cpuc->used)) |
| 609 | goto try_generic; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 610 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 611 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
| 612 | /* |
| 613 | * We set it so that counter_base + idx in wrmsr/rdmsr maps to |
| 614 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: |
| 615 | */ |
| 616 | hwc->counter_base = |
| 617 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 618 | hwc->idx = idx; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 619 | } else { |
| 620 | idx = hwc->idx; |
| 621 | /* Try to get the previous generic counter again */ |
| 622 | if (test_and_set_bit(idx, cpuc->used)) { |
| 623 | try_generic: |
| 624 | idx = find_first_zero_bit(cpuc->used, nr_counters_generic); |
| 625 | if (idx == nr_counters_generic) |
| 626 | return -EAGAIN; |
| 627 | |
| 628 | set_bit(idx, cpuc->used); |
| 629 | hwc->idx = idx; |
| 630 | } |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 631 | hwc->config_base = x86_pmu->eventsel; |
| 632 | hwc->counter_base = x86_pmu->perfctr; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | perf_counters_lapic_init(hwc->nmi); |
| 636 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 637 | __x86_pmu_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 638 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 639 | cpuc->counters[idx] = counter; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 640 | /* |
| 641 | * Make it visible before enabling the hw: |
| 642 | */ |
Robert Richter | 527e26a | 2009-04-29 12:47:02 +0200 | [diff] [blame] | 643 | barrier(); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 644 | |
Robert Richter | 26816c2 | 2009-04-29 12:47:08 +0200 | [diff] [blame^] | 645 | x86_perf_counter_set_period(counter, hwc, idx); |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 646 | __x86_pmu_enable(counter, hwc, idx); |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 647 | |
| 648 | return 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 649 | } |
| 650 | |
| 651 | void perf_counter_print_debug(void) |
| 652 | { |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 653 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 654 | struct cpu_hw_counters *cpuc; |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 655 | int cpu, idx; |
| 656 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 657 | if (!nr_counters_generic) |
Ingo Molnar | 1e12567 | 2008-12-09 12:18:18 +0100 | [diff] [blame] | 658 | return; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 659 | |
| 660 | local_irq_disable(); |
| 661 | |
| 662 | cpu = smp_processor_id(); |
Ingo Molnar | 0dff86a | 2008-12-23 12:28:12 +0100 | [diff] [blame] | 663 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 664 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 665 | if (intel_perfmon_version >= 2) { |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 666 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| 667 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
| 668 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); |
| 669 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 670 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 671 | pr_info("\n"); |
| 672 | pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); |
| 673 | pr_info("CPU#%d: status: %016llx\n", cpu, status); |
| 674 | pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); |
| 675 | pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 676 | } |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 677 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 678 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 679 | for (idx = 0; idx < nr_counters_generic; idx++) { |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 680 | rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl); |
| 681 | rdmsrl(x86_pmu->perfctr + idx, pmc_count); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 682 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 683 | prev_left = per_cpu(prev_left[idx], cpu); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 684 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 685 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 686 | cpu, idx, pmc_ctrl); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 687 | pr_info("CPU#%d: gen-PMC%d count: %016llx\n", |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 688 | cpu, idx, pmc_count); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 689 | pr_info("CPU#%d: gen-PMC%d left: %016llx\n", |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 690 | cpu, idx, prev_left); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 691 | } |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 692 | for (idx = 0; idx < nr_counters_fixed; idx++) { |
| 693 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); |
| 694 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 695 | pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 696 | cpu, idx, pmc_count); |
| 697 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 698 | local_irq_enable(); |
| 699 | } |
| 700 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 701 | static void x86_pmu_disable(struct perf_counter *counter) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 702 | { |
| 703 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
| 704 | struct hw_perf_counter *hwc = &counter->hw; |
| 705 | unsigned int idx = hwc->idx; |
| 706 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 707 | __x86_pmu_disable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 708 | |
| 709 | clear_bit(idx, cpuc->used); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 710 | cpuc->counters[idx] = NULL; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 711 | /* |
| 712 | * Make sure the cleared pointer becomes visible before we |
| 713 | * (potentially) free the counter: |
| 714 | */ |
Robert Richter | 527e26a | 2009-04-29 12:47:02 +0200 | [diff] [blame] | 715 | barrier(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 716 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 717 | /* |
| 718 | * Drain the remaining delta count out of a counter |
| 719 | * that we are disabling: |
| 720 | */ |
| 721 | x86_perf_counter_update(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 722 | } |
| 723 | |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 724 | /* |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 725 | * Save and restart an expired counter. Called by NMI contexts, |
| 726 | * so it has to be careful about preempting normal counter ops: |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 727 | */ |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 728 | static void perf_save_and_restart(struct perf_counter *counter) |
| 729 | { |
| 730 | struct hw_perf_counter *hwc = &counter->hw; |
| 731 | int idx = hwc->idx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 732 | |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 733 | x86_perf_counter_update(counter, hwc, idx); |
Robert Richter | 26816c2 | 2009-04-29 12:47:08 +0200 | [diff] [blame^] | 734 | x86_perf_counter_set_period(counter, hwc, idx); |
Ingo Molnar | 7e2ae34 | 2008-12-09 11:40:46 +0100 | [diff] [blame] | 735 | |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 736 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 737 | __x86_pmu_enable(counter, hwc, idx); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 738 | } |
| 739 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 740 | /* |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 741 | * Maximum interrupt frequency of 100KHz per CPU |
| 742 | */ |
Jaswinder Singh Rajput | 169e41e | 2009-02-28 18:37:49 +0530 | [diff] [blame] | 743 | #define PERFMON_MAX_INTERRUPTS (100000/HZ) |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 744 | |
| 745 | /* |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 746 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
| 747 | * rules apply: |
| 748 | */ |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 749 | static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 750 | { |
| 751 | int bit, cpu = smp_processor_id(); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 752 | u64 ack, status; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 753 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 754 | int ret = 0; |
Ingo Molnar | 43874d2 | 2008-12-09 12:23:59 +0100 | [diff] [blame] | 755 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 756 | cpuc->throttle_ctrl = hw_perf_save_disable(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 757 | |
Robert Richter | b7f8859 | 2009-04-29 12:47:06 +0200 | [diff] [blame] | 758 | status = intel_pmu_get_status(cpuc->throttle_ctrl); |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 759 | if (!status) |
| 760 | goto out; |
| 761 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 762 | ret = 1; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 763 | again: |
Mike Galbraith | d278c48 | 2009-02-09 07:38:50 +0100 | [diff] [blame] | 764 | inc_irq_stat(apic_perf_irqs); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 765 | ack = status; |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 766 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 767 | struct perf_counter *counter = cpuc->counters[bit]; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 768 | |
| 769 | clear_bit(bit, (unsigned long *) &status); |
| 770 | if (!counter) |
| 771 | continue; |
| 772 | |
| 773 | perf_save_and_restart(counter); |
Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 774 | if (perf_counter_overflow(counter, nmi, regs, 0)) |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 775 | __x86_pmu_disable(counter, &counter->hw, bit); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 776 | } |
| 777 | |
Robert Richter | dee5d90 | 2009-04-29 12:47:07 +0200 | [diff] [blame] | 778 | intel_pmu_ack_status(ack); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 779 | |
| 780 | /* |
| 781 | * Repeat if there is more work to be done: |
| 782 | */ |
Robert Richter | b7f8859 | 2009-04-29 12:47:06 +0200 | [diff] [blame] | 783 | status = intel_pmu_get_status(cpuc->throttle_ctrl); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 784 | if (status) |
| 785 | goto again; |
Ingo Molnar | 87b9cf4 | 2008-12-08 14:20:16 +0100 | [diff] [blame] | 786 | out: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 787 | /* |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 788 | * Restore - do not reenable when global enable is off or throttled: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 789 | */ |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 790 | if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 791 | hw_perf_restore(cpuc->throttle_ctrl); |
| 792 | |
| 793 | return ret; |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 794 | } |
| 795 | |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 796 | static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } |
| 797 | |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 798 | void perf_counter_unthrottle(void) |
| 799 | { |
| 800 | struct cpu_hw_counters *cpuc; |
| 801 | |
| 802 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 803 | return; |
| 804 | |
| 805 | if (unlikely(!perf_counters_initialized)) |
| 806 | return; |
| 807 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 808 | cpuc = &__get_cpu_var(cpu_hw_counters); |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 809 | if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 810 | if (printk_ratelimit()) |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 811 | printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n"); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 812 | hw_perf_restore(cpuc->throttle_ctrl); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 813 | } |
Mike Galbraith | 4b39fd9 | 2009-01-23 14:36:16 +0100 | [diff] [blame] | 814 | cpuc->interrupts = 0; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | void smp_perf_counter_interrupt(struct pt_regs *regs) |
| 818 | { |
| 819 | irq_enter(); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 820 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 821 | ack_APIC_irq(); |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 822 | x86_pmu->handle_irq(regs, 0); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 823 | irq_exit(); |
| 824 | } |
| 825 | |
Peter Zijlstra | b6276f3 | 2009-04-06 11:45:03 +0200 | [diff] [blame] | 826 | void smp_perf_pending_interrupt(struct pt_regs *regs) |
| 827 | { |
| 828 | irq_enter(); |
| 829 | ack_APIC_irq(); |
| 830 | inc_irq_stat(apic_pending_irqs); |
| 831 | perf_counter_do_pending(); |
| 832 | irq_exit(); |
| 833 | } |
| 834 | |
| 835 | void set_perf_counter_pending(void) |
| 836 | { |
| 837 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); |
| 838 | } |
| 839 | |
Mike Galbraith | 3415dd9 | 2009-01-23 14:16:53 +0100 | [diff] [blame] | 840 | void perf_counters_lapic_init(int nmi) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 841 | { |
| 842 | u32 apic_val; |
| 843 | |
| 844 | if (!perf_counters_initialized) |
| 845 | return; |
| 846 | /* |
| 847 | * Enable the performance counter vector in the APIC LVT: |
| 848 | */ |
| 849 | apic_val = apic_read(APIC_LVTERR); |
| 850 | |
| 851 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); |
| 852 | if (nmi) |
| 853 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 854 | else |
| 855 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
| 856 | apic_write(APIC_LVTERR, apic_val); |
| 857 | } |
| 858 | |
| 859 | static int __kprobes |
| 860 | perf_counter_nmi_handler(struct notifier_block *self, |
| 861 | unsigned long cmd, void *__args) |
| 862 | { |
| 863 | struct die_args *args = __args; |
| 864 | struct pt_regs *regs; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 865 | int ret; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 866 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 867 | switch (cmd) { |
| 868 | case DIE_NMI: |
| 869 | case DIE_NMI_IPI: |
| 870 | break; |
| 871 | |
| 872 | default: |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 873 | return NOTIFY_DONE; |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 874 | } |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 875 | |
| 876 | regs = args->regs; |
| 877 | |
| 878 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 879 | ret = x86_pmu->handle_irq(regs, 1); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 880 | |
Peter Zijlstra | b0f3f28 | 2009-03-05 18:08:27 +0100 | [diff] [blame] | 881 | return ret ? NOTIFY_STOP : NOTIFY_OK; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 882 | } |
| 883 | |
| 884 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { |
Mike Galbraith | 5b75af0 | 2009-02-04 17:11:34 +0100 | [diff] [blame] | 885 | .notifier_call = perf_counter_nmi_handler, |
| 886 | .next = NULL, |
| 887 | .priority = 1 |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 888 | }; |
| 889 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 890 | static struct x86_pmu intel_pmu = { |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 891 | .handle_irq = intel_pmu_handle_irq, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 892 | .save_disable_all = intel_pmu_save_disable_all, |
| 893 | .restore_all = intel_pmu_restore_all, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 894 | .enable = intel_pmu_enable_counter, |
| 895 | .disable = intel_pmu_disable_counter, |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 896 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
| 897 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 898 | .event_map = intel_pmu_event_map, |
| 899 | .raw_event = intel_pmu_raw_event, |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 900 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
| 901 | }; |
| 902 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 903 | static struct x86_pmu amd_pmu = { |
Robert Richter | 39d81ea | 2009-04-29 12:47:05 +0200 | [diff] [blame] | 904 | .handle_irq = amd_pmu_handle_irq, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 905 | .save_disable_all = amd_pmu_save_disable_all, |
| 906 | .restore_all = amd_pmu_restore_all, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 907 | .enable = amd_pmu_enable_counter, |
| 908 | .disable = amd_pmu_disable_counter, |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 909 | .eventsel = MSR_K7_EVNTSEL0, |
| 910 | .perfctr = MSR_K7_PERFCTR0, |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 911 | .event_map = amd_pmu_event_map, |
| 912 | .raw_event = amd_pmu_raw_event, |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 913 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
| 914 | }; |
| 915 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 916 | static struct x86_pmu *intel_pmu_init(void) |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 917 | { |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 918 | union cpuid10_edx edx; |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 919 | union cpuid10_eax eax; |
| 920 | unsigned int unused; |
| 921 | unsigned int ebx; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 922 | |
Robert Richter | da1a776 | 2009-04-29 12:46:58 +0200 | [diff] [blame] | 923 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 924 | return NULL; |
| 925 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 926 | /* |
| 927 | * Check whether the Architectural PerfMon supports |
| 928 | * Branch Misses Retired Event or not. |
| 929 | */ |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 930 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 931 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 932 | return NULL; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 933 | |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 934 | intel_perfmon_version = eax.split.version_id; |
| 935 | if (intel_perfmon_version < 2) |
| 936 | return NULL; |
| 937 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 938 | pr_info("Intel Performance Monitoring support detected.\n"); |
Ingo Molnar | 7bb497b | 2009-03-18 08:59:21 +0100 | [diff] [blame] | 939 | pr_info("... version: %d\n", intel_perfmon_version); |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 940 | pr_info("... bit width: %d\n", eax.split.bit_width); |
| 941 | pr_info("... mask length: %d\n", eax.split.mask_length); |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 942 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 943 | nr_counters_generic = eax.split.num_counters; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 944 | nr_counters_fixed = edx.split.num_counters_fixed; |
| 945 | counter_value_mask = (1ULL << eax.split.bit_width) - 1; |
| 946 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 947 | return &intel_pmu; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 950 | static struct x86_pmu *amd_pmu_init(void) |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 951 | { |
| 952 | nr_counters_generic = 4; |
| 953 | nr_counters_fixed = 0; |
Peter Zijlstra | b5e8acf | 2009-03-05 20:34:21 +0100 | [diff] [blame] | 954 | counter_value_mask = 0x0000FFFFFFFFFFFFULL; |
| 955 | counter_value_bits = 48; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 956 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 957 | pr_info("AMD Performance Monitoring support detected.\n"); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 958 | |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 959 | return &amd_pmu; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 960 | } |
| 961 | |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 962 | void __init init_hw_perf_counters(void) |
| 963 | { |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 964 | switch (boot_cpu_data.x86_vendor) { |
| 965 | case X86_VENDOR_INTEL: |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 966 | x86_pmu = intel_pmu_init(); |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 967 | break; |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 968 | case X86_VENDOR_AMD: |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 969 | x86_pmu = amd_pmu_init(); |
Jaswinder Singh Rajput | f87ad35 | 2009-02-27 20:15:14 +0530 | [diff] [blame] | 970 | break; |
Robert Richter | 4138960 | 2009-04-29 12:47:00 +0200 | [diff] [blame] | 971 | default: |
| 972 | return; |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 973 | } |
Robert Richter | 5f4ec28 | 2009-04-29 12:47:04 +0200 | [diff] [blame] | 974 | if (!x86_pmu) |
Jaswinder Singh Rajput | b56a380 | 2009-02-27 18:09:09 +0530 | [diff] [blame] | 975 | return; |
| 976 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 977 | pr_info("... num counters: %d\n", nr_counters_generic); |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 978 | if (nr_counters_generic > X86_PMC_MAX_GENERIC) { |
| 979 | nr_counters_generic = X86_PMC_MAX_GENERIC; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 980 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 981 | nr_counters_generic, X86_PMC_MAX_GENERIC); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 982 | } |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 983 | perf_counter_mask = (1 << nr_counters_generic) - 1; |
| 984 | perf_max_counters = nr_counters_generic; |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 985 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 986 | pr_info("... value mask: %016Lx\n", counter_value_mask); |
Ingo Molnar | 2f18d1e | 2008-12-22 11:10:42 +0100 | [diff] [blame] | 987 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 988 | if (nr_counters_fixed > X86_PMC_MAX_FIXED) { |
| 989 | nr_counters_fixed = X86_PMC_MAX_FIXED; |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 990 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 991 | nr_counters_fixed, X86_PMC_MAX_FIXED); |
Ingo Molnar | 703e937 | 2008-12-17 10:51:15 +0100 | [diff] [blame] | 992 | } |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 993 | pr_info("... fixed counters: %d\n", nr_counters_fixed); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 994 | |
Ingo Molnar | 862a1a5 | 2008-12-17 13:09:20 +0100 | [diff] [blame] | 995 | perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
| 996 | |
Jaswinder Singh Rajput | a1ef58f | 2009-02-28 18:45:39 +0530 | [diff] [blame] | 997 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); |
Ingo Molnar | 75f224c | 2008-12-14 21:58:46 +0100 | [diff] [blame] | 998 | perf_counters_initialized = true; |
| 999 | |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1000 | perf_counters_lapic_init(0); |
| 1001 | register_die_notifier(&perf_counter_nmi_notifier); |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1002 | } |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1003 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 1004 | static void x86_pmu_read(struct perf_counter *counter) |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 1005 | { |
| 1006 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
| 1007 | } |
| 1008 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 1009 | static const struct pmu pmu = { |
| 1010 | .enable = x86_pmu_enable, |
| 1011 | .disable = x86_pmu_disable, |
| 1012 | .read = x86_pmu_read, |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1013 | }; |
| 1014 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 1015 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1016 | { |
| 1017 | int err; |
| 1018 | |
| 1019 | err = __hw_perf_counter_init(counter); |
| 1020 | if (err) |
Peter Zijlstra | 9ea98e1 | 2009-03-30 19:07:09 +0200 | [diff] [blame] | 1021 | return ERR_PTR(err); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1022 | |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 1023 | return &pmu; |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1024 | } |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1025 | |
| 1026 | /* |
| 1027 | * callchain support |
| 1028 | */ |
| 1029 | |
| 1030 | static inline |
| 1031 | void callchain_store(struct perf_callchain_entry *entry, unsigned long ip) |
| 1032 | { |
| 1033 | if (entry->nr < MAX_STACK_DEPTH) |
| 1034 | entry->ip[entry->nr++] = ip; |
| 1035 | } |
| 1036 | |
| 1037 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); |
| 1038 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); |
| 1039 | |
| 1040 | |
| 1041 | static void |
| 1042 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) |
| 1043 | { |
| 1044 | /* Ignore warnings */ |
| 1045 | } |
| 1046 | |
| 1047 | static void backtrace_warning(void *data, char *msg) |
| 1048 | { |
| 1049 | /* Ignore warnings */ |
| 1050 | } |
| 1051 | |
| 1052 | static int backtrace_stack(void *data, char *name) |
| 1053 | { |
| 1054 | /* Don't bother with IRQ stacks for now */ |
| 1055 | return -1; |
| 1056 | } |
| 1057 | |
| 1058 | static void backtrace_address(void *data, unsigned long addr, int reliable) |
| 1059 | { |
| 1060 | struct perf_callchain_entry *entry = data; |
| 1061 | |
| 1062 | if (reliable) |
| 1063 | callchain_store(entry, addr); |
| 1064 | } |
| 1065 | |
| 1066 | static const struct stacktrace_ops backtrace_ops = { |
| 1067 | .warning = backtrace_warning, |
| 1068 | .warning_symbol = backtrace_warning_symbol, |
| 1069 | .stack = backtrace_stack, |
| 1070 | .address = backtrace_address, |
| 1071 | }; |
| 1072 | |
| 1073 | static void |
| 1074 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1075 | { |
| 1076 | unsigned long bp; |
| 1077 | char *stack; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame] | 1078 | int nr = entry->nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1079 | |
| 1080 | callchain_store(entry, instruction_pointer(regs)); |
| 1081 | |
| 1082 | stack = ((char *)regs + sizeof(struct pt_regs)); |
| 1083 | #ifdef CONFIG_FRAME_POINTER |
| 1084 | bp = frame_pointer(regs); |
| 1085 | #else |
| 1086 | bp = 0; |
| 1087 | #endif |
| 1088 | |
| 1089 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry); |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame] | 1090 | |
| 1091 | entry->kernel = entry->nr - nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | |
| 1095 | struct stack_frame { |
| 1096 | const void __user *next_fp; |
| 1097 | unsigned long return_address; |
| 1098 | }; |
| 1099 | |
| 1100 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) |
| 1101 | { |
| 1102 | int ret; |
| 1103 | |
| 1104 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) |
| 1105 | return 0; |
| 1106 | |
| 1107 | ret = 1; |
| 1108 | pagefault_disable(); |
| 1109 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) |
| 1110 | ret = 0; |
| 1111 | pagefault_enable(); |
| 1112 | |
| 1113 | return ret; |
| 1114 | } |
| 1115 | |
| 1116 | static void |
| 1117 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1118 | { |
| 1119 | struct stack_frame frame; |
| 1120 | const void __user *fp; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame] | 1121 | int nr = entry->nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1122 | |
| 1123 | regs = (struct pt_regs *)current->thread.sp0 - 1; |
| 1124 | fp = (void __user *)regs->bp; |
| 1125 | |
| 1126 | callchain_store(entry, regs->ip); |
| 1127 | |
| 1128 | while (entry->nr < MAX_STACK_DEPTH) { |
| 1129 | frame.next_fp = NULL; |
| 1130 | frame.return_address = 0; |
| 1131 | |
| 1132 | if (!copy_stack_frame(fp, &frame)) |
| 1133 | break; |
| 1134 | |
| 1135 | if ((unsigned long)fp < user_stack_pointer(regs)) |
| 1136 | break; |
| 1137 | |
| 1138 | callchain_store(entry, frame.return_address); |
| 1139 | fp = frame.next_fp; |
| 1140 | } |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame] | 1141 | |
| 1142 | entry->user = entry->nr - nr; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1143 | } |
| 1144 | |
| 1145 | static void |
| 1146 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) |
| 1147 | { |
| 1148 | int is_user; |
| 1149 | |
| 1150 | if (!regs) |
| 1151 | return; |
| 1152 | |
| 1153 | is_user = user_mode(regs); |
| 1154 | |
| 1155 | if (!current || current->pid == 0) |
| 1156 | return; |
| 1157 | |
| 1158 | if (is_user && current->state != TASK_RUNNING) |
| 1159 | return; |
| 1160 | |
| 1161 | if (!is_user) |
| 1162 | perf_callchain_kernel(regs, entry); |
| 1163 | |
| 1164 | if (current->mm) |
| 1165 | perf_callchain_user(regs, entry); |
| 1166 | } |
| 1167 | |
| 1168 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) |
| 1169 | { |
| 1170 | struct perf_callchain_entry *entry; |
| 1171 | |
| 1172 | if (in_nmi()) |
| 1173 | entry = &__get_cpu_var(nmi_entry); |
| 1174 | else |
| 1175 | entry = &__get_cpu_var(irq_entry); |
| 1176 | |
| 1177 | entry->nr = 0; |
Peter Zijlstra | 5872bdb8 | 2009-04-02 11:12:03 +0200 | [diff] [blame] | 1178 | entry->hv = 0; |
| 1179 | entry->kernel = 0; |
| 1180 | entry->user = 0; |
Peter Zijlstra | d7d59fb | 2009-03-30 19:07:15 +0200 | [diff] [blame] | 1181 | |
| 1182 | perf_do_callchain(regs, entry); |
| 1183 | |
| 1184 | return entry; |
| 1185 | } |