blob: fa6541d781bc9287908d13587e6cf4153b8f1640 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05306 * Copyright(C) 2009 Jaswinder Singh Rajput
Robert Richter39d81ea2009-04-29 12:47:05 +02007 * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
Ingo Molnar241771e2008-12-03 10:39:53 +01008 *
9 * For licencing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_counter.h>
13#include <linux/capability.h>
14#include <linux/notifier.h>
15#include <linux/hardirq.h>
16#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010017#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010018#include <linux/kdebug.h>
19#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020020#include <linux/uaccess.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020024#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010025
26static bool perf_counters_initialized __read_mostly;
27
28/*
29 * Number of (generic) HW counters:
30 */
Ingo Molnar862a1a52008-12-17 13:09:20 +010031static int nr_counters_generic __read_mostly;
32static u64 perf_counter_mask __read_mostly;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +010033static u64 counter_value_mask __read_mostly;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010034static int counter_value_bits __read_mostly;
Ingo Molnar241771e2008-12-03 10:39:53 +010035
Ingo Molnar862a1a52008-12-17 13:09:20 +010036static int nr_counters_fixed __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010037
Ingo Molnar241771e2008-12-03 10:39:53 +010038struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010039 struct perf_counter *counters[X86_PMC_IDX_MAX];
40 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010041 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010042 u64 throttle_ctrl;
Peter Zijlstra184fe4ab2009-03-08 11:34:19 +010043 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010044 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010045};
46
47/*
Robert Richter5f4ec282009-04-29 12:47:04 +020048 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010049 */
Robert Richter5f4ec282009-04-29 12:47:04 +020050struct x86_pmu {
Robert Richter39d81ea2009-04-29 12:47:05 +020051 int (*handle_irq)(struct pt_regs *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053052 u64 (*save_disable_all)(void);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010053 void (*restore_all)(u64);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010054 void (*enable)(int, u64);
55 void (*disable)(int, u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053056 unsigned eventsel;
57 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010058 u64 (*event_map)(int);
59 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053060 int max_events;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053061};
62
Robert Richter5f4ec282009-04-29 12:47:04 +020063static struct x86_pmu *x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053064
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010065static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
66 .enabled = 1,
67};
Ingo Molnar241771e2008-12-03 10:39:53 +010068
Ingo Molnar7bb497b2009-03-18 08:59:21 +010069static __read_mostly int intel_perfmon_version;
70
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053071/*
72 * Intel PerfMon v3. Used on Core2 and later.
73 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010074static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +010075{
Ingo Molnarf650a672008-12-23 12:17:29 +010076 [PERF_COUNT_CPU_CYCLES] = 0x003c,
Ingo Molnar241771e2008-12-03 10:39:53 +010077 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
78 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
79 [PERF_COUNT_CACHE_MISSES] = 0x412e,
80 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
81 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
Ingo Molnarf650a672008-12-23 12:17:29 +010082 [PERF_COUNT_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +010083};
84
Robert Richter5f4ec282009-04-29 12:47:04 +020085static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053086{
87 return intel_perfmon_event_map[event];
88}
Ingo Molnar241771e2008-12-03 10:39:53 +010089
Robert Richter5f4ec282009-04-29 12:47:04 +020090static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010091{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +010092#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
93#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
94#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010095
96#define CORE_EVNTSEL_MASK \
97 (CORE_EVNTSEL_EVENT_MASK | \
98 CORE_EVNTSEL_UNIT_MASK | \
99 CORE_EVNTSEL_COUNTER_MASK)
100
101 return event & CORE_EVNTSEL_MASK;
102}
103
Ingo Molnar241771e2008-12-03 10:39:53 +0100104/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530105 * AMD Performance Monitor K7 and later.
106 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100107static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530108{
109 [PERF_COUNT_CPU_CYCLES] = 0x0076,
110 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
111 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
112 [PERF_COUNT_CACHE_MISSES] = 0x0081,
113 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
115};
116
Robert Richter5f4ec282009-04-29 12:47:04 +0200117static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530118{
119 return amd_perfmon_event_map[event];
120}
121
Robert Richter5f4ec282009-04-29 12:47:04 +0200122static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100123{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100124#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
125#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
126#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100127
128#define K7_EVNTSEL_MASK \
129 (K7_EVNTSEL_EVENT_MASK | \
130 K7_EVNTSEL_UNIT_MASK | \
131 K7_EVNTSEL_COUNTER_MASK)
132
133 return event & K7_EVNTSEL_MASK;
134}
135
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530136/*
Ingo Molnaree060942008-12-13 09:00:03 +0100137 * Propagate counter elapsed time into the generic counter.
138 * Can only be executed on the CPU where the counter is active.
139 * Returns the delta events processed.
140 */
141static void
142x86_perf_counter_update(struct perf_counter *counter,
143 struct hw_perf_counter *hwc, int idx)
144{
145 u64 prev_raw_count, new_raw_count, delta;
146
Ingo Molnaree060942008-12-13 09:00:03 +0100147 /*
148 * Careful: an NMI might modify the previous counter value.
149 *
150 * Our tactic to handle this is to first atomically read and
151 * exchange a new raw count - then add that new-prev delta
152 * count to the generic counter atomically:
153 */
154again:
155 prev_raw_count = atomic64_read(&hwc->prev_count);
156 rdmsrl(hwc->counter_base + idx, new_raw_count);
157
158 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
159 new_raw_count) != prev_raw_count)
160 goto again;
161
162 /*
163 * Now we have the new raw value and have updated the prev
164 * timestamp already. We can now calculate the elapsed delta
165 * (counter-)time and add that to the generic counter.
166 *
167 * Careful, not all hw sign-extends above the physical width
168 * of the count, so we do that by clipping the delta to 32 bits:
169 */
170 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100171
172 atomic64_add(delta, &counter->count);
173 atomic64_sub(delta, &hwc->period_left);
174}
175
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200176static atomic_t num_counters;
177static DEFINE_MUTEX(pmc_reserve_mutex);
178
179static bool reserve_pmc_hardware(void)
180{
181 int i;
182
183 if (nmi_watchdog == NMI_LOCAL_APIC)
184 disable_lapic_nmi_watchdog();
185
186 for (i = 0; i < nr_counters_generic; i++) {
Robert Richter5f4ec282009-04-29 12:47:04 +0200187 if (!reserve_perfctr_nmi(x86_pmu->perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200188 goto perfctr_fail;
189 }
190
191 for (i = 0; i < nr_counters_generic; i++) {
Robert Richter5f4ec282009-04-29 12:47:04 +0200192 if (!reserve_evntsel_nmi(x86_pmu->eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200193 goto eventsel_fail;
194 }
195
196 return true;
197
198eventsel_fail:
199 for (i--; i >= 0; i--)
Robert Richter5f4ec282009-04-29 12:47:04 +0200200 release_evntsel_nmi(x86_pmu->eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200201
202 i = nr_counters_generic;
203
204perfctr_fail:
205 for (i--; i >= 0; i--)
Robert Richter5f4ec282009-04-29 12:47:04 +0200206 release_perfctr_nmi(x86_pmu->perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200207
208 if (nmi_watchdog == NMI_LOCAL_APIC)
209 enable_lapic_nmi_watchdog();
210
211 return false;
212}
213
214static void release_pmc_hardware(void)
215{
216 int i;
217
218 for (i = 0; i < nr_counters_generic; i++) {
Robert Richter5f4ec282009-04-29 12:47:04 +0200219 release_perfctr_nmi(x86_pmu->perfctr + i);
220 release_evntsel_nmi(x86_pmu->eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200221 }
222
223 if (nmi_watchdog == NMI_LOCAL_APIC)
224 enable_lapic_nmi_watchdog();
225}
226
227static void hw_perf_counter_destroy(struct perf_counter *counter)
228{
229 if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
230 release_pmc_hardware();
231 mutex_unlock(&pmc_reserve_mutex);
232 }
233}
234
Ingo Molnaree060942008-12-13 09:00:03 +0100235/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100236 * Setup the hardware configuration for a given hw_event_type
237 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100238static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100239{
Ingo Molnar9f66a382008-12-10 12:33:23 +0100240 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar241771e2008-12-03 10:39:53 +0100241 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200242 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100243
Robert Richter39d81ea2009-04-29 12:47:05 +0200244 /* disable temporarily */
245 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
246 return -ENOSYS;
247
Ingo Molnar241771e2008-12-03 10:39:53 +0100248 if (unlikely(!perf_counters_initialized))
249 return -EINVAL;
250
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200251 err = 0;
252 if (atomic_inc_not_zero(&num_counters)) {
253 mutex_lock(&pmc_reserve_mutex);
254 if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
255 err = -EBUSY;
256 else
257 atomic_inc(&num_counters);
258 mutex_unlock(&pmc_reserve_mutex);
259 }
260 if (err)
261 return err;
262
Ingo Molnar241771e2008-12-03 10:39:53 +0100263 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100264 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100265 * (keep 'enabled' bit clear for now)
266 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100267 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100268
269 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100270 * Count user and OS events unless requested not to.
271 */
272 if (!hw_event->exclude_user)
273 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
274 if (!hw_event->exclude_kernel)
275 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
276
277 /*
278 * If privileged enough, allow NMI events:
Ingo Molnar241771e2008-12-03 10:39:53 +0100279 */
280 hwc->nmi = 0;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100281 if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
282 hwc->nmi = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100283
Ingo Molnar9f66a382008-12-10 12:33:23 +0100284 hwc->irq_period = hw_event->irq_period;
Ingo Molnar241771e2008-12-03 10:39:53 +0100285 /*
286 * Intel PMCs cannot be accessed sanely above 32 bit width,
287 * so we install an artificial 1<<31 period regardless of
288 * the generic counter period:
289 */
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530290 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
291 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
292 hwc->irq_period = 0x7FFFFFFF;
Ingo Molnar241771e2008-12-03 10:39:53 +0100293
Ingo Molnaree060942008-12-13 09:00:03 +0100294 atomic64_set(&hwc->period_left, hwc->irq_period);
Ingo Molnar241771e2008-12-03 10:39:53 +0100295
296 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100297 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100298 */
Peter Zijlstraf4a2deb42009-03-23 18:22:06 +0100299 if (perf_event_raw(hw_event)) {
Robert Richter5f4ec282009-04-29 12:47:04 +0200300 hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100301 } else {
Robert Richter5f4ec282009-04-29 12:47:04 +0200302 if (perf_event_id(hw_event) >= x86_pmu->max_events)
Ingo Molnar241771e2008-12-03 10:39:53 +0100303 return -EINVAL;
304 /*
305 * The generic map:
306 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200307 hwc->config |= x86_pmu->event_map(perf_event_id(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100308 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100309
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200310 counter->destroy = hw_perf_counter_destroy;
311
Ingo Molnar241771e2008-12-03 10:39:53 +0100312 return 0;
313}
314
Robert Richter5f4ec282009-04-29 12:47:04 +0200315static u64 intel_pmu_save_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100316{
317 u64 ctrl;
318
319 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
Ingo Molnar862a1a52008-12-17 13:09:20 +0100320 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100321
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100322 return ctrl;
323}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530324
Robert Richter5f4ec282009-04-29 12:47:04 +0200325static u64 amd_pmu_save_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530326{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100327 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
328 int enabled, idx;
329
330 enabled = cpuc->enabled;
331 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100332 /*
333 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200334 * counters proper, so that amd_pmu_enable_counter() does the
335 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100336 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100337 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530338
339 for (idx = 0; idx < nr_counters_generic; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100340 u64 val;
341
Robert Richter4295ee62009-04-29 12:47:01 +0200342 if (!test_bit(idx, cpuc->active_mask))
343 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530344 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200345 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
346 continue;
347 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
348 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530349 }
350
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100351 return enabled;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530352}
353
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530354u64 hw_perf_save_disable(void)
355{
356 if (unlikely(!perf_counters_initialized))
357 return 0;
358
Robert Richter5f4ec282009-04-29 12:47:04 +0200359 return x86_pmu->save_disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530360}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100361/*
362 * Exported because of ACPI idle
363 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100364EXPORT_SYMBOL_GPL(hw_perf_save_disable);
Ingo Molnar241771e2008-12-03 10:39:53 +0100365
Robert Richter5f4ec282009-04-29 12:47:04 +0200366static void intel_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530367{
368 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
369}
370
Robert Richter5f4ec282009-04-29 12:47:04 +0200371static void amd_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530372{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100373 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530374 int idx;
375
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100376 cpuc->enabled = ctrl;
377 barrier();
378 if (!ctrl)
379 return;
380
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530381 for (idx = 0; idx < nr_counters_generic; idx++) {
Robert Richter4295ee62009-04-29 12:47:01 +0200382 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100383
Robert Richter4295ee62009-04-29 12:47:01 +0200384 if (!test_bit(idx, cpuc->active_mask))
385 continue;
386 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
387 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
388 continue;
389 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
390 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530391 }
392}
393
Ingo Molnaree060942008-12-13 09:00:03 +0100394void hw_perf_restore(u64 ctrl)
395{
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100396 if (unlikely(!perf_counters_initialized))
397 return;
398
Robert Richter5f4ec282009-04-29 12:47:04 +0200399 x86_pmu->restore_all(ctrl);
Ingo Molnaree060942008-12-13 09:00:03 +0100400}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100401/*
402 * Exported because of ACPI idle
403 */
Ingo Molnaree060942008-12-13 09:00:03 +0100404EXPORT_SYMBOL_GPL(hw_perf_restore);
405
Robert Richterb7f88592009-04-29 12:47:06 +0200406static inline u64 intel_pmu_get_status(u64 mask)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100407{
408 u64 status;
409
Robert Richterb7f88592009-04-29 12:47:06 +0200410 if (unlikely(!perf_counters_initialized))
411 return 0;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100412 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
413
414 return status;
415}
416
Robert Richterdee5d902009-04-29 12:47:07 +0200417static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100418{
419 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
420}
421
Robert Richter5f4ec282009-04-29 12:47:04 +0200422static void intel_pmu_enable_counter(int idx, u64 config)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100423{
424 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
425 config | ARCH_PERFMON_EVENTSEL0_ENABLE);
426}
427
Robert Richter5f4ec282009-04-29 12:47:04 +0200428static void amd_pmu_enable_counter(int idx, u64 config)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100429{
430 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
431
Peter Zijlstra184fe4ab2009-03-08 11:34:19 +0100432 set_bit(idx, cpuc->active_mask);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100433 if (cpuc->enabled)
434 config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
435
436 wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
437}
438
439static void hw_perf_enable(int idx, u64 config)
440{
441 if (unlikely(!perf_counters_initialized))
442 return;
443
Robert Richter5f4ec282009-04-29 12:47:04 +0200444 x86_pmu->enable(idx, config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100445}
446
Robert Richter5f4ec282009-04-29 12:47:04 +0200447static void intel_pmu_disable_counter(int idx, u64 config)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100448{
449 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
450}
451
Robert Richter5f4ec282009-04-29 12:47:04 +0200452static void amd_pmu_disable_counter(int idx, u64 config)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100453{
454 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
455
Peter Zijlstra184fe4ab2009-03-08 11:34:19 +0100456 clear_bit(idx, cpuc->active_mask);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100457 wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
458
459}
460
461static void hw_perf_disable(int idx, u64 config)
462{
463 if (unlikely(!perf_counters_initialized))
464 return;
465
Robert Richter5f4ec282009-04-29 12:47:04 +0200466 x86_pmu->disable(idx, config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100467}
468
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100469static inline void
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100470__pmc_fixed_disable(struct perf_counter *counter,
471 struct hw_perf_counter *hwc, unsigned int __idx)
472{
473 int idx = __idx - X86_PMC_IDX_FIXED;
474 u64 ctrl_val, mask;
475 int err;
476
477 mask = 0xfULL << (idx * 4);
478
479 rdmsrl(hwc->config_base, ctrl_val);
480 ctrl_val &= ~mask;
481 err = checking_wrmsrl(hwc->config_base, ctrl_val);
482}
483
484static inline void
Robert Richter4aeb0b42009-04-29 12:47:03 +0200485__x86_pmu_disable(struct perf_counter *counter,
486 struct hw_perf_counter *hwc, unsigned int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100487{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100488 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530489 __pmc_fixed_disable(counter, hwc, idx);
490 else
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100491 hw_perf_disable(idx, hwc->config);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100492}
493
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100494static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100495
Ingo Molnaree060942008-12-13 09:00:03 +0100496/*
497 * Set the next IRQ period, based on the hwc->period_left value.
498 * To be called with the counter disabled in hw:
499 */
500static void
Robert Richter26816c22009-04-29 12:47:08 +0200501x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100502 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100503{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100504 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstra595258a2009-03-13 12:21:28 +0100505 s64 period = hwc->irq_period;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100506 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100507
Ingo Molnaree060942008-12-13 09:00:03 +0100508 /*
509 * If we are way outside a reasoable range then just skip forward:
510 */
511 if (unlikely(left <= -period)) {
512 left = period;
513 atomic64_set(&hwc->period_left, left);
514 }
515
516 if (unlikely(left <= 0)) {
517 left += period;
518 atomic64_set(&hwc->period_left, left);
519 }
520
Ingo Molnaree060942008-12-13 09:00:03 +0100521 per_cpu(prev_left[idx], smp_processor_id()) = left;
522
523 /*
524 * The hw counter starts counting from this counter offset,
525 * mark it to be able to extra future deltas:
526 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100527 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100528
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100529 err = checking_wrmsrl(hwc->counter_base + idx,
530 (u64)(-left) & counter_value_mask);
531}
532
533static inline void
534__pmc_fixed_enable(struct perf_counter *counter,
535 struct hw_perf_counter *hwc, unsigned int __idx)
536{
537 int idx = __idx - X86_PMC_IDX_FIXED;
538 u64 ctrl_val, bits, mask;
539 int err;
540
541 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100542 * Enable IRQ generation (0x8),
543 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
544 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100545 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100546 bits = 0x8ULL;
547 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
548 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100549 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
550 bits |= 0x1;
551 bits <<= (idx * 4);
552 mask = 0xfULL << (idx * 4);
553
554 rdmsrl(hwc->config_base, ctrl_val);
555 ctrl_val &= ~mask;
556 ctrl_val |= bits;
557 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100558}
559
Ingo Molnaree060942008-12-13 09:00:03 +0100560static void
Robert Richter4aeb0b42009-04-29 12:47:03 +0200561__x86_pmu_enable(struct perf_counter *counter,
562 struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100563{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100564 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530565 __pmc_fixed_enable(counter, hwc, idx);
566 else
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100567 hw_perf_enable(idx, hwc->config);
Ingo Molnar241771e2008-12-03 10:39:53 +0100568}
569
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100570static int
571fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +0100572{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100573 unsigned int event;
574
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530575 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
576 return -1;
577
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100578 if (unlikely(hwc->nmi))
579 return -1;
580
581 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
582
Robert Richter5f4ec282009-04-29 12:47:04 +0200583 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100584 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Robert Richter5f4ec282009-04-29 12:47:04 +0200585 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100586 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Robert Richter5f4ec282009-04-29 12:47:04 +0200587 if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100588 return X86_PMC_IDX_FIXED_BUS_CYCLES;
589
Ingo Molnar862a1a52008-12-17 13:09:20 +0100590 return -1;
591}
592
Ingo Molnaree060942008-12-13 09:00:03 +0100593/*
594 * Find a PMC slot for the freshly enabled / scheduled in counter:
595 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200596static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100597{
598 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
599 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100600 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100601
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100602 idx = fixed_mode_idx(counter, hwc);
603 if (idx >= 0) {
604 /*
605 * Try to get the fixed counter, if that is already taken
606 * then try to get a generic counter:
607 */
608 if (test_and_set_bit(idx, cpuc->used))
609 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100610
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100611 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
612 /*
613 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
614 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
615 */
616 hwc->counter_base =
617 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +0100618 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100619 } else {
620 idx = hwc->idx;
621 /* Try to get the previous generic counter again */
622 if (test_and_set_bit(idx, cpuc->used)) {
623try_generic:
624 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
625 if (idx == nr_counters_generic)
626 return -EAGAIN;
627
628 set_bit(idx, cpuc->used);
629 hwc->idx = idx;
630 }
Robert Richter5f4ec282009-04-29 12:47:04 +0200631 hwc->config_base = x86_pmu->eventsel;
632 hwc->counter_base = x86_pmu->perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100633 }
634
635 perf_counters_lapic_init(hwc->nmi);
636
Robert Richter4aeb0b42009-04-29 12:47:03 +0200637 __x86_pmu_disable(counter, hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100638
Ingo Molnar862a1a52008-12-17 13:09:20 +0100639 cpuc->counters[idx] = counter;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100640 /*
641 * Make it visible before enabling the hw:
642 */
Robert Richter527e26a2009-04-29 12:47:02 +0200643 barrier();
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100644
Robert Richter26816c22009-04-29 12:47:08 +0200645 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter4aeb0b42009-04-29 12:47:03 +0200646 __x86_pmu_enable(counter, hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100647
648 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100649}
650
651void perf_counter_print_debug(void)
652{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100653 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100654 struct cpu_hw_counters *cpuc;
Ingo Molnar1e125672008-12-09 12:18:18 +0100655 int cpu, idx;
656
Ingo Molnar862a1a52008-12-17 13:09:20 +0100657 if (!nr_counters_generic)
Ingo Molnar1e125672008-12-09 12:18:18 +0100658 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100659
660 local_irq_disable();
661
662 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100663 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100664
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100665 if (intel_perfmon_version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530666 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
667 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
668 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
669 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +0100670
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530671 pr_info("\n");
672 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
673 pr_info("CPU#%d: status: %016llx\n", cpu, status);
674 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
675 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530676 }
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530677 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
Ingo Molnar241771e2008-12-03 10:39:53 +0100678
Ingo Molnar862a1a52008-12-17 13:09:20 +0100679 for (idx = 0; idx < nr_counters_generic; idx++) {
Robert Richter5f4ec282009-04-29 12:47:04 +0200680 rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl);
681 rdmsrl(x86_pmu->perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100682
Ingo Molnaree060942008-12-13 09:00:03 +0100683 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100684
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530685 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100686 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530687 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100688 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530689 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100690 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100691 }
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100692 for (idx = 0; idx < nr_counters_fixed; idx++) {
693 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
694
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530695 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100696 cpu, idx, pmc_count);
697 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100698 local_irq_enable();
699}
700
Robert Richter4aeb0b42009-04-29 12:47:03 +0200701static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100702{
703 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
704 struct hw_perf_counter *hwc = &counter->hw;
705 unsigned int idx = hwc->idx;
706
Robert Richter4aeb0b42009-04-29 12:47:03 +0200707 __x86_pmu_disable(counter, hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100708
709 clear_bit(idx, cpuc->used);
Ingo Molnar862a1a52008-12-17 13:09:20 +0100710 cpuc->counters[idx] = NULL;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100711 /*
712 * Make sure the cleared pointer becomes visible before we
713 * (potentially) free the counter:
714 */
Robert Richter527e26a2009-04-29 12:47:02 +0200715 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +0100716
Ingo Molnaree060942008-12-13 09:00:03 +0100717 /*
718 * Drain the remaining delta count out of a counter
719 * that we are disabling:
720 */
721 x86_perf_counter_update(counter, hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100722}
723
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100724/*
Ingo Molnaree060942008-12-13 09:00:03 +0100725 * Save and restart an expired counter. Called by NMI contexts,
726 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100727 */
Ingo Molnar241771e2008-12-03 10:39:53 +0100728static void perf_save_and_restart(struct perf_counter *counter)
729{
730 struct hw_perf_counter *hwc = &counter->hw;
731 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100732
Ingo Molnaree060942008-12-13 09:00:03 +0100733 x86_perf_counter_update(counter, hwc, idx);
Robert Richter26816c22009-04-29 12:47:08 +0200734 x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100735
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100736 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter4aeb0b42009-04-29 12:47:03 +0200737 __x86_pmu_enable(counter, hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100738}
739
Ingo Molnar241771e2008-12-03 10:39:53 +0100740/*
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100741 * Maximum interrupt frequency of 100KHz per CPU
742 */
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530743#define PERFMON_MAX_INTERRUPTS (100000/HZ)
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100744
745/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100746 * This handler is triggered by the local APIC, so the APIC IRQ handling
747 * rules apply:
748 */
Robert Richter39d81ea2009-04-29 12:47:05 +0200749static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100750{
751 int bit, cpu = smp_processor_id();
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100752 u64 ack, status;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100753 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100754 int ret = 0;
Ingo Molnar43874d22008-12-09 12:23:59 +0100755
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100756 cpuc->throttle_ctrl = hw_perf_save_disable();
Ingo Molnar241771e2008-12-03 10:39:53 +0100757
Robert Richterb7f88592009-04-29 12:47:06 +0200758 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100759 if (!status)
760 goto out;
761
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100762 ret = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100763again:
Mike Galbraithd278c482009-02-09 07:38:50 +0100764 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100765 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100766 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +0100767 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +0100768
769 clear_bit(bit, (unsigned long *) &status);
770 if (!counter)
771 continue;
772
773 perf_save_and_restart(counter);
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200774 if (perf_counter_overflow(counter, nmi, regs, 0))
Robert Richter4aeb0b42009-04-29 12:47:03 +0200775 __x86_pmu_disable(counter, &counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +0100776 }
777
Robert Richterdee5d902009-04-29 12:47:07 +0200778 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +0100779
780 /*
781 * Repeat if there is more work to be done:
782 */
Robert Richterb7f88592009-04-29 12:47:06 +0200783 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar241771e2008-12-03 10:39:53 +0100784 if (status)
785 goto again;
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100786out:
Ingo Molnar241771e2008-12-03 10:39:53 +0100787 /*
Mike Galbraith1b023a92009-01-23 10:13:01 +0100788 * Restore - do not reenable when global enable is off or throttled:
Ingo Molnar241771e2008-12-03 10:39:53 +0100789 */
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100790 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100791 hw_perf_restore(cpuc->throttle_ctrl);
792
793 return ret;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100794}
795
Robert Richter39d81ea2009-04-29 12:47:05 +0200796static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; }
797
Mike Galbraith1b023a92009-01-23 10:13:01 +0100798void perf_counter_unthrottle(void)
799{
800 struct cpu_hw_counters *cpuc;
801
802 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
803 return;
804
805 if (unlikely(!perf_counters_initialized))
806 return;
807
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100808 cpuc = &__get_cpu_var(cpu_hw_counters);
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100809 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
Mike Galbraith1b023a92009-01-23 10:13:01 +0100810 if (printk_ratelimit())
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100811 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100812 hw_perf_restore(cpuc->throttle_ctrl);
Mike Galbraith1b023a92009-01-23 10:13:01 +0100813 }
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100814 cpuc->interrupts = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100815}
816
817void smp_perf_counter_interrupt(struct pt_regs *regs)
818{
819 irq_enter();
Ingo Molnar241771e2008-12-03 10:39:53 +0100820 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100821 ack_APIC_irq();
Robert Richter39d81ea2009-04-29 12:47:05 +0200822 x86_pmu->handle_irq(regs, 0);
Ingo Molnar241771e2008-12-03 10:39:53 +0100823 irq_exit();
824}
825
Peter Zijlstrab6276f32009-04-06 11:45:03 +0200826void smp_perf_pending_interrupt(struct pt_regs *regs)
827{
828 irq_enter();
829 ack_APIC_irq();
830 inc_irq_stat(apic_pending_irqs);
831 perf_counter_do_pending();
832 irq_exit();
833}
834
835void set_perf_counter_pending(void)
836{
837 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
838}
839
Mike Galbraith3415dd92009-01-23 14:16:53 +0100840void perf_counters_lapic_init(int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100841{
842 u32 apic_val;
843
844 if (!perf_counters_initialized)
845 return;
846 /*
847 * Enable the performance counter vector in the APIC LVT:
848 */
849 apic_val = apic_read(APIC_LVTERR);
850
851 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
852 if (nmi)
853 apic_write(APIC_LVTPC, APIC_DM_NMI);
854 else
855 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
856 apic_write(APIC_LVTERR, apic_val);
857}
858
859static int __kprobes
860perf_counter_nmi_handler(struct notifier_block *self,
861 unsigned long cmd, void *__args)
862{
863 struct die_args *args = __args;
864 struct pt_regs *regs;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100865 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100866
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100867 switch (cmd) {
868 case DIE_NMI:
869 case DIE_NMI_IPI:
870 break;
871
872 default:
Ingo Molnar241771e2008-12-03 10:39:53 +0100873 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100874 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100875
876 regs = args->regs;
877
878 apic_write(APIC_LVTPC, APIC_DM_NMI);
Robert Richter39d81ea2009-04-29 12:47:05 +0200879 ret = x86_pmu->handle_irq(regs, 1);
Ingo Molnar241771e2008-12-03 10:39:53 +0100880
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100881 return ret ? NOTIFY_STOP : NOTIFY_OK;
Ingo Molnar241771e2008-12-03 10:39:53 +0100882}
883
884static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +0100885 .notifier_call = perf_counter_nmi_handler,
886 .next = NULL,
887 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +0100888};
889
Robert Richter5f4ec282009-04-29 12:47:04 +0200890static struct x86_pmu intel_pmu = {
Robert Richter39d81ea2009-04-29 12:47:05 +0200891 .handle_irq = intel_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200892 .save_disable_all = intel_pmu_save_disable_all,
893 .restore_all = intel_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200894 .enable = intel_pmu_enable_counter,
895 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530896 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
897 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200898 .event_map = intel_pmu_event_map,
899 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530900 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
901};
902
Robert Richter5f4ec282009-04-29 12:47:04 +0200903static struct x86_pmu amd_pmu = {
Robert Richter39d81ea2009-04-29 12:47:05 +0200904 .handle_irq = amd_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200905 .save_disable_all = amd_pmu_save_disable_all,
906 .restore_all = amd_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200907 .enable = amd_pmu_enable_counter,
908 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530909 .eventsel = MSR_K7_EVNTSEL0,
910 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200911 .event_map = amd_pmu_event_map,
912 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
914};
915
Robert Richter5f4ec282009-04-29 12:47:04 +0200916static struct x86_pmu *intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100917{
Ingo Molnar703e9372008-12-17 10:51:15 +0100918 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100919 union cpuid10_eax eax;
920 unsigned int unused;
921 unsigned int ebx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100922
Robert Richterda1a7762009-04-29 12:46:58 +0200923 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
924 return NULL;
925
Ingo Molnar241771e2008-12-03 10:39:53 +0100926 /*
927 * Check whether the Architectural PerfMon supports
928 * Branch Misses Retired Event or not.
929 */
Ingo Molnar703e9372008-12-17 10:51:15 +0100930 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +0100931 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530932 return NULL;
Ingo Molnar241771e2008-12-03 10:39:53 +0100933
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100934 intel_perfmon_version = eax.split.version_id;
935 if (intel_perfmon_version < 2)
936 return NULL;
937
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530938 pr_info("Intel Performance Monitoring support detected.\n");
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100939 pr_info("... version: %d\n", intel_perfmon_version);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530940 pr_info("... bit width: %d\n", eax.split.bit_width);
941 pr_info("... mask length: %d\n", eax.split.mask_length);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530942
Ingo Molnar862a1a52008-12-17 13:09:20 +0100943 nr_counters_generic = eax.split.num_counters;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530944 nr_counters_fixed = edx.split.num_counters_fixed;
945 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
946
Robert Richter5f4ec282009-04-29 12:47:04 +0200947 return &intel_pmu;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530948}
949
Robert Richter5f4ec282009-04-29 12:47:04 +0200950static struct x86_pmu *amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530951{
952 nr_counters_generic = 4;
953 nr_counters_fixed = 0;
Peter Zijlstrab5e8acf2009-03-05 20:34:21 +0100954 counter_value_mask = 0x0000FFFFFFFFFFFFULL;
955 counter_value_bits = 48;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530956
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530957 pr_info("AMD Performance Monitoring support detected.\n");
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530958
Robert Richter5f4ec282009-04-29 12:47:04 +0200959 return &amd_pmu;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530960}
961
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530962void __init init_hw_perf_counters(void)
963{
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530964 switch (boot_cpu_data.x86_vendor) {
965 case X86_VENDOR_INTEL:
Robert Richter5f4ec282009-04-29 12:47:04 +0200966 x86_pmu = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530967 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530968 case X86_VENDOR_AMD:
Robert Richter5f4ec282009-04-29 12:47:04 +0200969 x86_pmu = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530970 break;
Robert Richter41389602009-04-29 12:47:00 +0200971 default:
972 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530973 }
Robert Richter5f4ec282009-04-29 12:47:04 +0200974 if (!x86_pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530975 return;
976
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530977 pr_info("... num counters: %d\n", nr_counters_generic);
Ingo Molnar862a1a52008-12-17 13:09:20 +0100978 if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
979 nr_counters_generic = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +0100980 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Ingo Molnar862a1a52008-12-17 13:09:20 +0100981 nr_counters_generic, X86_PMC_MAX_GENERIC);
Ingo Molnar241771e2008-12-03 10:39:53 +0100982 }
Ingo Molnar862a1a52008-12-17 13:09:20 +0100983 perf_counter_mask = (1 << nr_counters_generic) - 1;
984 perf_max_counters = nr_counters_generic;
Ingo Molnar241771e2008-12-03 10:39:53 +0100985
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530986 pr_info("... value mask: %016Lx\n", counter_value_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100987
Ingo Molnar862a1a52008-12-17 13:09:20 +0100988 if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
989 nr_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +0100990 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Ingo Molnar862a1a52008-12-17 13:09:20 +0100991 nr_counters_fixed, X86_PMC_MAX_FIXED);
Ingo Molnar703e9372008-12-17 10:51:15 +0100992 }
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530993 pr_info("... fixed counters: %d\n", nr_counters_fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +0100994
Ingo Molnar862a1a52008-12-17 13:09:20 +0100995 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
996
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530997 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar75f224c2008-12-14 21:58:46 +0100998 perf_counters_initialized = true;
999
Ingo Molnar241771e2008-12-03 10:39:53 +01001000 perf_counters_lapic_init(0);
1001 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001002}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001003
Robert Richter4aeb0b42009-04-29 12:47:03 +02001004static void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001005{
1006 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1007}
1008
Robert Richter4aeb0b42009-04-29 12:47:03 +02001009static const struct pmu pmu = {
1010 .enable = x86_pmu_enable,
1011 .disable = x86_pmu_disable,
1012 .read = x86_pmu_read,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001013};
1014
Robert Richter4aeb0b42009-04-29 12:47:03 +02001015const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001016{
1017 int err;
1018
1019 err = __hw_perf_counter_init(counter);
1020 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001021 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001022
Robert Richter4aeb0b42009-04-29 12:47:03 +02001023 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001024}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001025
1026/*
1027 * callchain support
1028 */
1029
1030static inline
1031void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1032{
1033 if (entry->nr < MAX_STACK_DEPTH)
1034 entry->ip[entry->nr++] = ip;
1035}
1036
1037static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1038static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1039
1040
1041static void
1042backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1043{
1044 /* Ignore warnings */
1045}
1046
1047static void backtrace_warning(void *data, char *msg)
1048{
1049 /* Ignore warnings */
1050}
1051
1052static int backtrace_stack(void *data, char *name)
1053{
1054 /* Don't bother with IRQ stacks for now */
1055 return -1;
1056}
1057
1058static void backtrace_address(void *data, unsigned long addr, int reliable)
1059{
1060 struct perf_callchain_entry *entry = data;
1061
1062 if (reliable)
1063 callchain_store(entry, addr);
1064}
1065
1066static const struct stacktrace_ops backtrace_ops = {
1067 .warning = backtrace_warning,
1068 .warning_symbol = backtrace_warning_symbol,
1069 .stack = backtrace_stack,
1070 .address = backtrace_address,
1071};
1072
1073static void
1074perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1075{
1076 unsigned long bp;
1077 char *stack;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001078 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001079
1080 callchain_store(entry, instruction_pointer(regs));
1081
1082 stack = ((char *)regs + sizeof(struct pt_regs));
1083#ifdef CONFIG_FRAME_POINTER
1084 bp = frame_pointer(regs);
1085#else
1086 bp = 0;
1087#endif
1088
1089 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001090
1091 entry->kernel = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001092}
1093
1094
1095struct stack_frame {
1096 const void __user *next_fp;
1097 unsigned long return_address;
1098};
1099
1100static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1101{
1102 int ret;
1103
1104 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1105 return 0;
1106
1107 ret = 1;
1108 pagefault_disable();
1109 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1110 ret = 0;
1111 pagefault_enable();
1112
1113 return ret;
1114}
1115
1116static void
1117perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1118{
1119 struct stack_frame frame;
1120 const void __user *fp;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001121 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001122
1123 regs = (struct pt_regs *)current->thread.sp0 - 1;
1124 fp = (void __user *)regs->bp;
1125
1126 callchain_store(entry, regs->ip);
1127
1128 while (entry->nr < MAX_STACK_DEPTH) {
1129 frame.next_fp = NULL;
1130 frame.return_address = 0;
1131
1132 if (!copy_stack_frame(fp, &frame))
1133 break;
1134
1135 if ((unsigned long)fp < user_stack_pointer(regs))
1136 break;
1137
1138 callchain_store(entry, frame.return_address);
1139 fp = frame.next_fp;
1140 }
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001141
1142 entry->user = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001143}
1144
1145static void
1146perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1147{
1148 int is_user;
1149
1150 if (!regs)
1151 return;
1152
1153 is_user = user_mode(regs);
1154
1155 if (!current || current->pid == 0)
1156 return;
1157
1158 if (is_user && current->state != TASK_RUNNING)
1159 return;
1160
1161 if (!is_user)
1162 perf_callchain_kernel(regs, entry);
1163
1164 if (current->mm)
1165 perf_callchain_user(regs, entry);
1166}
1167
1168struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1169{
1170 struct perf_callchain_entry *entry;
1171
1172 if (in_nmi())
1173 entry = &__get_cpu_var(nmi_entry);
1174 else
1175 entry = &__get_cpu_var(irq_entry);
1176
1177 entry->nr = 0;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001178 entry->hv = 0;
1179 entry->kernel = 0;
1180 entry->user = 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001181
1182 perf_do_callchain(regs, entry);
1183
1184 return entry;
1185}