blob: 8f53f3a7da29e23fad6eb7f7896cf0d58bcf9f62 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnar241771e2008-12-03 10:39:53 +01009 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010018#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010019#include <linux/kdebug.h>
20#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020021#include <linux/uaccess.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010022
Ingo Molnar241771e2008-12-03 10:39:53 +010023#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020024#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020025#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010026
Ingo Molnar862a1a52008-12-17 13:09:20 +010027static u64 perf_counter_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010030 struct perf_counter *counters[X86_PMC_IDX_MAX];
Robert Richter43f62012009-04-29 16:55:56 +020031 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010033 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010034 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010035};
36
37/*
Robert Richter5f4ec282009-04-29 12:47:04 +020038 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010039 */
Robert Richter5f4ec282009-04-29 12:47:04 +020040struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020041 const char *name;
42 int version;
Yong Wanga3288102009-06-03 13:12:55 +080043 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020044 void (*disable_all)(void);
45 void (*enable_all)(void);
Robert Richter7c90cc42009-04-29 12:47:18 +020046 void (*enable)(struct hw_perf_counter *, int);
Robert Richterd4369892009-04-29 12:47:19 +020047 void (*disable)(struct hw_perf_counter *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053048 unsigned eventsel;
49 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010050 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053052 int max_events;
Robert Richter0933e5c2009-04-29 12:47:12 +020053 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
Robert Richterc619b8f2009-04-29 12:47:23 +020057 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020058 u64 intel_ctrl;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053059};
60
Robert Richter4a06bd82009-04-29 12:47:11 +020061static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053062
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010063static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
65};
Ingo Molnar241771e2008-12-03 10:39:53 +010066
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053067/*
68 * Intel PerfMon v3. Used on Core2 and later.
69 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010070static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +010071{
Ingo Molnarf650a672008-12-23 12:17:29 +010072 [PERF_COUNT_CPU_CYCLES] = 0x003c,
Ingo Molnar241771e2008-12-03 10:39:53 +010073 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
Ingo Molnarf650a672008-12-23 12:17:29 +010078 [PERF_COUNT_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +010079};
80
Robert Richter5f4ec282009-04-29 12:47:04 +020081static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053082{
83 return intel_perfmon_event_map[event];
84}
Ingo Molnar241771e2008-12-03 10:39:53 +010085
Robert Richter5f4ec282009-04-29 12:47:04 +020086static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010087{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +010088#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
89#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +020090#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
91#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +010092#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010093
Ingo Molnar128f0482009-06-03 22:19:36 +020094#define CORE_EVNTSEL_MASK \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010095 (CORE_EVNTSEL_EVENT_MASK | \
96 CORE_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +020097 CORE_EVNTSEL_EDGE_MASK | \
98 CORE_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010099 CORE_EVNTSEL_COUNTER_MASK)
100
101 return event & CORE_EVNTSEL_MASK;
102}
103
Ingo Molnar241771e2008-12-03 10:39:53 +0100104/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530105 * AMD Performance Monitor K7 and later.
106 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100107static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530108{
109 [PERF_COUNT_CPU_CYCLES] = 0x0076,
110 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
111 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
112 [PERF_COUNT_CACHE_MISSES] = 0x0081,
113 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
114 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
115};
116
Robert Richter5f4ec282009-04-29 12:47:04 +0200117static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530118{
119 return amd_perfmon_event_map[event];
120}
121
Robert Richter5f4ec282009-04-29 12:47:04 +0200122static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100123{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100124#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
125#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200126#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
127#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100128#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100129
130#define K7_EVNTSEL_MASK \
131 (K7_EVNTSEL_EVENT_MASK | \
132 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200133 K7_EVNTSEL_EDGE_MASK | \
134 K7_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100135 K7_EVNTSEL_COUNTER_MASK)
136
137 return event & K7_EVNTSEL_MASK;
138}
139
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530140/*
Ingo Molnaree060942008-12-13 09:00:03 +0100141 * Propagate counter elapsed time into the generic counter.
142 * Can only be executed on the CPU where the counter is active.
143 * Returns the delta events processed.
144 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200145static u64
Ingo Molnaree060942008-12-13 09:00:03 +0100146x86_perf_counter_update(struct perf_counter *counter,
147 struct hw_perf_counter *hwc, int idx)
148{
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200149 int shift = 64 - x86_pmu.counter_bits;
150 u64 prev_raw_count, new_raw_count;
151 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100152
Ingo Molnaree060942008-12-13 09:00:03 +0100153 /*
154 * Careful: an NMI might modify the previous counter value.
155 *
156 * Our tactic to handle this is to first atomically read and
157 * exchange a new raw count - then add that new-prev delta
158 * count to the generic counter atomically:
159 */
160again:
161 prev_raw_count = atomic64_read(&hwc->prev_count);
162 rdmsrl(hwc->counter_base + idx, new_raw_count);
163
164 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
165 new_raw_count) != prev_raw_count)
166 goto again;
167
168 /*
169 * Now we have the new raw value and have updated the prev
170 * timestamp already. We can now calculate the elapsed delta
171 * (counter-)time and add that to the generic counter.
172 *
173 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200174 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100175 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200176 delta = (new_raw_count << shift) - (prev_raw_count << shift);
177 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100178
179 atomic64_add(delta, &counter->count);
180 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200181
182 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100183}
184
Peter Zijlstraba778132009-05-04 18:47:44 +0200185static atomic_t active_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200186static DEFINE_MUTEX(pmc_reserve_mutex);
187
188static bool reserve_pmc_hardware(void)
189{
190 int i;
191
192 if (nmi_watchdog == NMI_LOCAL_APIC)
193 disable_lapic_nmi_watchdog();
194
Robert Richter0933e5c2009-04-29 12:47:12 +0200195 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200196 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200197 goto perfctr_fail;
198 }
199
Robert Richter0933e5c2009-04-29 12:47:12 +0200200 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200201 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200202 goto eventsel_fail;
203 }
204
205 return true;
206
207eventsel_fail:
208 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200209 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200210
Robert Richter0933e5c2009-04-29 12:47:12 +0200211 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200212
213perfctr_fail:
214 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200215 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200216
217 if (nmi_watchdog == NMI_LOCAL_APIC)
218 enable_lapic_nmi_watchdog();
219
220 return false;
221}
222
223static void release_pmc_hardware(void)
224{
225 int i;
226
Robert Richter0933e5c2009-04-29 12:47:12 +0200227 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200228 release_perfctr_nmi(x86_pmu.perfctr + i);
229 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200230 }
231
232 if (nmi_watchdog == NMI_LOCAL_APIC)
233 enable_lapic_nmi_watchdog();
234}
235
236static void hw_perf_counter_destroy(struct perf_counter *counter)
237{
Peter Zijlstraba778132009-05-04 18:47:44 +0200238 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200239 release_pmc_hardware();
240 mutex_unlock(&pmc_reserve_mutex);
241 }
242}
243
Robert Richter85cf9db2009-04-29 12:47:20 +0200244static inline int x86_pmu_initialized(void)
245{
246 return x86_pmu.handle_irq != NULL;
247}
248
Ingo Molnaree060942008-12-13 09:00:03 +0100249/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200250 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100251 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100252static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100253{
Peter Zijlstra0d486962009-06-02 19:22:16 +0200254 struct perf_counter_attr *attr = &counter->attr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100255 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200256 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100257
Robert Richter85cf9db2009-04-29 12:47:20 +0200258 if (!x86_pmu_initialized())
259 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100260
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200261 err = 0;
Peter Zijlstraba778132009-05-04 18:47:44 +0200262 if (!atomic_inc_not_zero(&active_counters)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200263 mutex_lock(&pmc_reserve_mutex);
Peter Zijlstraba778132009-05-04 18:47:44 +0200264 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200265 err = -EBUSY;
266 else
Peter Zijlstraba778132009-05-04 18:47:44 +0200267 atomic_inc(&active_counters);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200268 mutex_unlock(&pmc_reserve_mutex);
269 }
270 if (err)
271 return err;
272
Ingo Molnar241771e2008-12-03 10:39:53 +0100273 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100274 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100275 * (keep 'enabled' bit clear for now)
276 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100277 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100278
279 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100280 * Count user and OS events unless requested not to.
281 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200282 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100283 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200284 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100285 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
286
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200287 if (!hwc->sample_period)
288 hwc->sample_period = x86_pmu.max_period;
Ingo Molnard2517a42009-05-17 10:04:45 +0200289
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200290 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar241771e2008-12-03 10:39:53 +0100291
292 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100293 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100294 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200295 if (perf_event_raw(attr)) {
296 hwc->config |= x86_pmu.raw_event(perf_event_config(attr));
Ingo Molnar241771e2008-12-03 10:39:53 +0100297 } else {
Peter Zijlstra0d486962009-06-02 19:22:16 +0200298 if (perf_event_id(attr) >= x86_pmu.max_events)
Ingo Molnar241771e2008-12-03 10:39:53 +0100299 return -EINVAL;
300 /*
301 * The generic map:
302 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200303 hwc->config |= x86_pmu.event_map(perf_event_id(attr));
Ingo Molnar241771e2008-12-03 10:39:53 +0100304 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100305
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200306 counter->destroy = hw_perf_counter_destroy;
307
Ingo Molnar241771e2008-12-03 10:39:53 +0100308 return 0;
309}
310
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200311static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100312{
Ingo Molnar862a1a52008-12-17 13:09:20 +0100313 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100314}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530315
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200316static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530317{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100318 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200319 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100320
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200321 if (!cpuc->enabled)
322 return;
323
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100324 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100325 /*
326 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200327 * counters proper, so that amd_pmu_enable_counter() does the
328 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100329 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100330 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530331
Robert Richter0933e5c2009-04-29 12:47:12 +0200332 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100333 u64 val;
334
Robert Richter43f62012009-04-29 16:55:56 +0200335 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200336 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530337 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200338 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
339 continue;
340 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
341 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530342 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530343}
344
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200345void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530346{
Robert Richter85cf9db2009-04-29 12:47:20 +0200347 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200348 return;
349 return x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530350}
Ingo Molnar241771e2008-12-03 10:39:53 +0100351
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200352static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530353{
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200354 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530355}
356
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200357static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530358{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100359 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530360 int idx;
361
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200362 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100363 return;
364
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200365 cpuc->enabled = 1;
366 barrier();
367
Robert Richter0933e5c2009-04-29 12:47:12 +0200368 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4295ee62009-04-29 12:47:01 +0200369 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100370
Robert Richter43f62012009-04-29 16:55:56 +0200371 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200372 continue;
373 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
374 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
375 continue;
376 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
377 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530378 }
379}
380
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200381void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100382{
Robert Richter85cf9db2009-04-29 12:47:20 +0200383 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100384 return;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200385 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100386}
Ingo Molnaree060942008-12-13 09:00:03 +0100387
Robert Richter19d84da2009-04-29 12:47:25 +0200388static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100389{
390 u64 status;
391
392 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
393
394 return status;
395}
396
Robert Richterdee5d902009-04-29 12:47:07 +0200397static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100398{
399 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
400}
401
Robert Richter7c90cc42009-04-29 12:47:18 +0200402static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100403{
Robert Richter7c90cc42009-04-29 12:47:18 +0200404 int err;
Robert Richter7c90cc42009-04-29 12:47:18 +0200405 err = checking_wrmsrl(hwc->config_base + idx,
406 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100407}
408
Robert Richterd4369892009-04-29 12:47:19 +0200409static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100410{
Robert Richterd4369892009-04-29 12:47:19 +0200411 int err;
Robert Richterd4369892009-04-29 12:47:19 +0200412 err = checking_wrmsrl(hwc->config_base + idx,
413 hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100414}
415
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100416static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200417intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100418{
419 int idx = __idx - X86_PMC_IDX_FIXED;
420 u64 ctrl_val, mask;
421 int err;
422
423 mask = 0xfULL << (idx * 4);
424
425 rdmsrl(hwc->config_base, ctrl_val);
426 ctrl_val &= ~mask;
427 err = checking_wrmsrl(hwc->config_base, ctrl_val);
428}
429
430static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200431intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100432{
Robert Richterd4369892009-04-29 12:47:19 +0200433 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
434 intel_pmu_disable_fixed(hwc, idx);
435 return;
436 }
437
438 x86_pmu_disable_counter(hwc, idx);
439}
440
441static inline void
442amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
443{
444 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100445}
446
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100447static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100448
Ingo Molnaree060942008-12-13 09:00:03 +0100449/*
450 * Set the next IRQ period, based on the hwc->period_left value.
451 * To be called with the counter disabled in hw:
452 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200453static int
Robert Richter26816c22009-04-29 12:47:08 +0200454x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100455 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100456{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100457 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200458 s64 period = hwc->sample_period;
459 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100460
Ingo Molnaree060942008-12-13 09:00:03 +0100461 /*
462 * If we are way outside a reasoable range then just skip forward:
463 */
464 if (unlikely(left <= -period)) {
465 left = period;
466 atomic64_set(&hwc->period_left, left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200467 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100468 }
469
470 if (unlikely(left <= 0)) {
471 left += period;
472 atomic64_set(&hwc->period_left, left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200473 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100474 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200475 /*
476 * Quirk: certain CPUs dont like it if just 1 event is left:
477 */
478 if (unlikely(left < 2))
479 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100480
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200481 if (left > x86_pmu.max_period)
482 left = x86_pmu.max_period;
483
Ingo Molnaree060942008-12-13 09:00:03 +0100484 per_cpu(prev_left[idx], smp_processor_id()) = left;
485
486 /*
487 * The hw counter starts counting from this counter offset,
488 * mark it to be able to extra future deltas:
489 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100490 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100491
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100492 err = checking_wrmsrl(hwc->counter_base + idx,
Robert Richter0933e5c2009-04-29 12:47:12 +0200493 (u64)(-left) & x86_pmu.counter_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200494
495 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100496}
497
498static inline void
Robert Richter7c90cc42009-04-29 12:47:18 +0200499intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100500{
501 int idx = __idx - X86_PMC_IDX_FIXED;
502 u64 ctrl_val, bits, mask;
503 int err;
504
505 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100506 * Enable IRQ generation (0x8),
507 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
508 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100509 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100510 bits = 0x8ULL;
511 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
512 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100513 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
514 bits |= 0x1;
515 bits <<= (idx * 4);
516 mask = 0xfULL << (idx * 4);
517
518 rdmsrl(hwc->config_base, ctrl_val);
519 ctrl_val &= ~mask;
520 ctrl_val |= bits;
521 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100522}
523
Robert Richter7c90cc42009-04-29 12:47:18 +0200524static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100525{
Robert Richter7c90cc42009-04-29 12:47:18 +0200526 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
527 intel_pmu_enable_fixed(hwc, idx);
528 return;
529 }
530
531 x86_pmu_enable_counter(hwc, idx);
532}
533
534static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
535{
536 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
537
538 if (cpuc->enabled)
539 x86_pmu_enable_counter(hwc, idx);
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530540 else
Robert Richterd4369892009-04-29 12:47:19 +0200541 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100542}
543
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100544static int
545fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +0100546{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100547 unsigned int event;
548
Robert Richteref7b3e02009-04-29 12:47:24 +0200549 if (!x86_pmu.num_counters_fixed)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530550 return -1;
551
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100552 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
553
Robert Richter4a06bd82009-04-29 12:47:11 +0200554 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100555 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Robert Richter4a06bd82009-04-29 12:47:11 +0200556 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100557 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Robert Richter4a06bd82009-04-29 12:47:11 +0200558 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100559 return X86_PMC_IDX_FIXED_BUS_CYCLES;
560
Ingo Molnar862a1a52008-12-17 13:09:20 +0100561 return -1;
562}
563
Ingo Molnaree060942008-12-13 09:00:03 +0100564/*
565 * Find a PMC slot for the freshly enabled / scheduled in counter:
566 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200567static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100568{
569 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
570 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100571 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100572
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100573 idx = fixed_mode_idx(counter, hwc);
574 if (idx >= 0) {
575 /*
576 * Try to get the fixed counter, if that is already taken
577 * then try to get a generic counter:
578 */
Robert Richter43f62012009-04-29 16:55:56 +0200579 if (test_and_set_bit(idx, cpuc->used_mask))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100580 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100581
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100582 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
583 /*
584 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
585 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
586 */
587 hwc->counter_base =
588 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +0100589 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100590 } else {
591 idx = hwc->idx;
592 /* Try to get the previous generic counter again */
Robert Richter43f62012009-04-29 16:55:56 +0200593 if (test_and_set_bit(idx, cpuc->used_mask)) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100594try_generic:
Robert Richter43f62012009-04-29 16:55:56 +0200595 idx = find_first_zero_bit(cpuc->used_mask,
Robert Richter0933e5c2009-04-29 12:47:12 +0200596 x86_pmu.num_counters);
597 if (idx == x86_pmu.num_counters)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100598 return -EAGAIN;
599
Robert Richter43f62012009-04-29 16:55:56 +0200600 set_bit(idx, cpuc->used_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100601 hwc->idx = idx;
602 }
Robert Richter4a06bd82009-04-29 12:47:11 +0200603 hwc->config_base = x86_pmu.eventsel;
604 hwc->counter_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100605 }
606
Yong Wangc323d952009-05-29 13:28:35 +0800607 perf_counters_lapic_init();
Ingo Molnar53b441a2009-05-25 21:41:28 +0200608
Robert Richterd4369892009-04-29 12:47:19 +0200609 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100610
Ingo Molnar862a1a52008-12-17 13:09:20 +0100611 cpuc->counters[idx] = counter;
Robert Richter43f62012009-04-29 16:55:56 +0200612 set_bit(idx, cpuc->active_mask);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100613
Robert Richter26816c22009-04-29 12:47:08 +0200614 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +0200615 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100616
617 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100618}
619
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200620static void x86_pmu_unthrottle(struct perf_counter *counter)
621{
622 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
623 struct hw_perf_counter *hwc = &counter->hw;
624
625 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
626 cpuc->counters[hwc->idx] != counter))
627 return;
628
629 x86_pmu.enable(hwc, hwc->idx);
630}
631
Ingo Molnar241771e2008-12-03 10:39:53 +0100632void perf_counter_print_debug(void)
633{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100634 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100635 struct cpu_hw_counters *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200636 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100637 int cpu, idx;
638
Robert Richter0933e5c2009-04-29 12:47:12 +0200639 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100640 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100641
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200642 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100643
644 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100645 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100646
Robert Richterfaa28ae2009-04-29 12:47:13 +0200647 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530648 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
649 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
650 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
651 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +0100652
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530653 pr_info("\n");
654 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
655 pr_info("CPU#%d: status: %016llx\n", cpu, status);
656 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
657 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530658 }
Robert Richter43f62012009-04-29 16:55:56 +0200659 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +0100660
Robert Richter0933e5c2009-04-29 12:47:12 +0200661 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200662 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
663 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100664
Ingo Molnaree060942008-12-13 09:00:03 +0100665 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100666
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530667 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100668 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530669 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100670 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530671 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100672 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100673 }
Robert Richter0933e5c2009-04-29 12:47:12 +0200674 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100675 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
676
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530677 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100678 cpu, idx, pmc_count);
679 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200680 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100681}
682
Robert Richter4aeb0b42009-04-29 12:47:03 +0200683static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100684{
685 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
686 struct hw_perf_counter *hwc = &counter->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +0200687 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100688
Robert Richter09534232009-04-29 12:47:16 +0200689 /*
690 * Must be done before we disable, otherwise the nmi handler
691 * could reenable again:
692 */
Robert Richter43f62012009-04-29 16:55:56 +0200693 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +0200694 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100695
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100696 /*
697 * Make sure the cleared pointer becomes visible before we
698 * (potentially) free the counter:
699 */
Robert Richter527e26a2009-04-29 12:47:02 +0200700 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +0100701
Ingo Molnaree060942008-12-13 09:00:03 +0100702 /*
703 * Drain the remaining delta count out of a counter
704 * that we are disabling:
705 */
706 x86_perf_counter_update(counter, hwc, idx);
Robert Richter09534232009-04-29 12:47:16 +0200707 cpuc->counters[idx] = NULL;
Robert Richter43f62012009-04-29 16:55:56 +0200708 clear_bit(idx, cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +0100709}
710
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100711/*
Ingo Molnaree060942008-12-13 09:00:03 +0100712 * Save and restart an expired counter. Called by NMI contexts,
713 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100714 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200715static int intel_pmu_save_and_restart(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100716{
717 struct hw_perf_counter *hwc = &counter->hw;
718 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200719 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100720
Ingo Molnaree060942008-12-13 09:00:03 +0100721 x86_perf_counter_update(counter, hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200722 ret = x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100723
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100724 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter7c90cc42009-04-29 12:47:18 +0200725 intel_pmu_enable_counter(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200726
727 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100728}
729
Ingo Molnaraaba9802009-05-26 08:10:00 +0200730static void intel_pmu_reset(void)
731{
732 unsigned long flags;
733 int idx;
734
735 if (!x86_pmu.num_counters)
736 return;
737
738 local_irq_save(flags);
739
740 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
741
742 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
743 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
744 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
745 }
746 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
747 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
748 }
749
750 local_irq_restore(flags);
751}
752
753
Ingo Molnar241771e2008-12-03 10:39:53 +0100754/*
755 * This handler is triggered by the local APIC, so the APIC IRQ handling
756 * rules apply:
757 */
Yong Wanga3288102009-06-03 13:12:55 +0800758static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +0100759{
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200760 struct cpu_hw_counters *cpuc;
761 struct cpu_hw_counters;
762 int bit, cpu, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100763 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200764
765 cpu = smp_processor_id();
766 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar43874d22008-12-09 12:23:59 +0100767
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200768 perf_disable();
Robert Richter19d84da2009-04-29 12:47:25 +0200769 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200770 if (!status) {
771 perf_enable();
772 return 0;
773 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100774
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200775 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100776again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200777 if (++loops > 100) {
778 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
Ingo Molnar34adc802009-05-20 20:13:28 +0200779 perf_counter_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +0200780 intel_pmu_reset();
781 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200782 return 1;
783 }
784
Mike Galbraithd278c482009-02-09 07:38:50 +0100785 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100786 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100787 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +0100788 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +0100789
790 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +0200791 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +0100792 continue;
793
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200794 if (!intel_pmu_save_and_restart(counter))
795 continue;
796
Yong Wanga3288102009-06-03 13:12:55 +0800797 if (perf_counter_overflow(counter, 1, regs, 0))
Robert Richterd4369892009-04-29 12:47:19 +0200798 intel_pmu_disable_counter(&counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +0100799 }
800
Robert Richterdee5d902009-04-29 12:47:07 +0200801 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +0100802
803 /*
804 * Repeat if there is more work to be done:
805 */
Robert Richter19d84da2009-04-29 12:47:25 +0200806 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +0100807 if (status)
808 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100809
Peter Zijlstra48e22d52009-05-25 17:39:04 +0200810 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200811
812 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100813}
814
Yong Wanga3288102009-06-03 13:12:55 +0800815static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +0200816{
Peter Zijlstra48e22d52009-05-25 17:39:04 +0200817 int cpu, idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200818 struct cpu_hw_counters *cpuc;
Robert Richtera29aa8a2009-04-29 12:47:21 +0200819 struct perf_counter *counter;
820 struct hw_perf_counter *hwc;
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200821 u64 val;
822
823 cpu = smp_processor_id();
824 cpuc = &per_cpu(cpu_hw_counters, cpu);
Robert Richtera29aa8a2009-04-29 12:47:21 +0200825
Robert Richtera29aa8a2009-04-29 12:47:21 +0200826 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +0200827 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +0200828 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +0200829
Robert Richtera29aa8a2009-04-29 12:47:21 +0200830 counter = cpuc->counters[idx];
831 hwc = &counter->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +0200832
Robert Richter4b7bfd02009-04-29 12:47:22 +0200833 val = x86_perf_counter_update(counter, hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +0200834 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +0200835 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +0200836
Robert Richtera29aa8a2009-04-29 12:47:21 +0200837 /* counter overflow */
Robert Richtera29aa8a2009-04-29 12:47:21 +0200838 handled = 1;
839 inc_irq_stat(apic_perf_irqs);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200840 if (!x86_perf_counter_set_period(counter, hwc, idx))
841 continue;
842
Yong Wanga3288102009-06-03 13:12:55 +0800843 if (perf_counter_overflow(counter, 1, regs, 0))
Robert Richtera29aa8a2009-04-29 12:47:21 +0200844 amd_pmu_disable_counter(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +0200845 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +0200846
Robert Richtera29aa8a2009-04-29 12:47:21 +0200847 return handled;
848}
Robert Richter39d81ea2009-04-29 12:47:05 +0200849
Peter Zijlstrab6276f32009-04-06 11:45:03 +0200850void smp_perf_pending_interrupt(struct pt_regs *regs)
851{
852 irq_enter();
853 ack_APIC_irq();
854 inc_irq_stat(apic_pending_irqs);
855 perf_counter_do_pending();
856 irq_exit();
857}
858
859void set_perf_counter_pending(void)
860{
861 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
862}
863
Yong Wangc323d952009-05-29 13:28:35 +0800864void perf_counters_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100865{
Robert Richter85cf9db2009-04-29 12:47:20 +0200866 if (!x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +0100867 return;
Robert Richter85cf9db2009-04-29 12:47:20 +0200868
Ingo Molnar241771e2008-12-03 10:39:53 +0100869 /*
Yong Wangc323d952009-05-29 13:28:35 +0800870 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +0100871 */
Yong Wangc323d952009-05-29 13:28:35 +0800872 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +0100873}
874
875static int __kprobes
876perf_counter_nmi_handler(struct notifier_block *self,
877 unsigned long cmd, void *__args)
878{
879 struct die_args *args = __args;
880 struct pt_regs *regs;
881
Peter Zijlstraba778132009-05-04 18:47:44 +0200882 if (!atomic_read(&active_counters))
Peter Zijlstra63a809a2009-05-01 12:23:17 +0200883 return NOTIFY_DONE;
884
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100885 switch (cmd) {
886 case DIE_NMI:
887 case DIE_NMI_IPI:
888 break;
889
890 default:
Ingo Molnar241771e2008-12-03 10:39:53 +0100891 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100892 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100893
894 regs = args->regs;
895
896 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstraa4016a72009-05-14 14:52:17 +0200897 /*
898 * Can't rely on the handled return value to say it was our NMI, two
899 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
900 *
901 * If the first NMI handles both, the latter will be empty and daze
902 * the CPU.
903 */
Yong Wanga3288102009-06-03 13:12:55 +0800904 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100905
Peter Zijlstraa4016a72009-05-14 14:52:17 +0200906 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +0100907}
908
909static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +0100910 .notifier_call = perf_counter_nmi_handler,
911 .next = NULL,
912 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +0100913};
914
Robert Richter5f4ec282009-04-29 12:47:04 +0200915static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200916 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +0200917 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200918 .disable_all = intel_pmu_disable_all,
919 .enable_all = intel_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200920 .enable = intel_pmu_enable_counter,
921 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530922 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
923 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200924 .event_map = intel_pmu_event_map,
925 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530926 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Robert Richterc619b8f2009-04-29 12:47:23 +0200927 /*
928 * Intel PMCs cannot be accessed sanely above 32 bit width,
929 * so we install an artificial 1<<31 period regardless of
930 * the generic counter period:
931 */
932 .max_period = (1ULL << 31) - 1,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530933};
934
Robert Richter5f4ec282009-04-29 12:47:04 +0200935static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200936 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +0200937 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200938 .disable_all = amd_pmu_disable_all,
939 .enable_all = amd_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200940 .enable = amd_pmu_enable_counter,
941 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530942 .eventsel = MSR_K7_EVNTSEL0,
943 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200944 .event_map = amd_pmu_event_map,
945 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530946 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Robert Richter0933e5c2009-04-29 12:47:12 +0200947 .num_counters = 4,
948 .counter_bits = 48,
949 .counter_mask = (1ULL << 48) - 1,
Robert Richterc619b8f2009-04-29 12:47:23 +0200950 /* use highest bit to detect overflow */
951 .max_period = (1ULL << 47) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530952};
953
Robert Richter72eae042009-04-29 12:47:10 +0200954static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100955{
Ingo Molnar703e9372008-12-17 10:51:15 +0100956 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100957 union cpuid10_eax eax;
958 unsigned int unused;
959 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200960 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +0100961
Robert Richterda1a7762009-04-29 12:46:58 +0200962 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
Robert Richter72eae042009-04-29 12:47:10 +0200963 return -ENODEV;
Robert Richterda1a7762009-04-29 12:46:58 +0200964
Ingo Molnar241771e2008-12-03 10:39:53 +0100965 /*
966 * Check whether the Architectural PerfMon supports
967 * Branch Misses Retired Event or not.
968 */
Ingo Molnar703e9372008-12-17 10:51:15 +0100969 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +0100970 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +0200971 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100972
Robert Richterfaa28ae2009-04-29 12:47:13 +0200973 version = eax.split.version_id;
974 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +0200975 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100976
Robert Richter4a06bd82009-04-29 12:47:11 +0200977 x86_pmu = intel_pmu;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200978 x86_pmu.version = version;
Robert Richter0933e5c2009-04-29 12:47:12 +0200979 x86_pmu.num_counters = eax.split.num_counters;
Ingo Molnar066d7de2009-05-04 19:04:09 +0200980
981 /*
982 * Quirk: v2 perfmon does not report fixed-purpose counters, so
983 * assume at least 3 counters:
984 */
985 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
986
Robert Richter0933e5c2009-04-29 12:47:12 +0200987 x86_pmu.counter_bits = eax.split.bit_width;
988 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530989
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200990 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
991
Robert Richter72eae042009-04-29 12:47:10 +0200992 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530993}
994
Robert Richter72eae042009-04-29 12:47:10 +0200995static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530996{
Robert Richter4a06bd82009-04-29 12:47:11 +0200997 x86_pmu = amd_pmu;
Robert Richter72eae042009-04-29 12:47:10 +0200998 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530999}
1000
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301001void __init init_hw_perf_counters(void)
1002{
Robert Richter72eae042009-04-29 12:47:10 +02001003 int err;
1004
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301005 switch (boot_cpu_data.x86_vendor) {
1006 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001007 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301008 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301009 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001010 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301011 break;
Robert Richter41389602009-04-29 12:47:00 +02001012 default:
1013 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301014 }
Robert Richter72eae042009-04-29 12:47:10 +02001015 if (err != 0)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301016 return;
1017
Robert Richterfaa28ae2009-04-29 12:47:13 +02001018 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
1019 pr_info("... version: %d\n", x86_pmu.version);
1020 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1021
Robert Richter0933e5c2009-04-29 12:47:12 +02001022 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1023 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1024 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001025 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001026 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
Ingo Molnar241771e2008-12-03 10:39:53 +01001027 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001028 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1029 perf_max_counters = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001030
Robert Richter0933e5c2009-04-29 12:47:12 +02001031 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
Robert Richterc619b8f2009-04-29 12:47:23 +02001032 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001033
Robert Richter0933e5c2009-04-29 12:47:12 +02001034 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1035 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001036 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001037 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
Ingo Molnar703e9372008-12-17 10:51:15 +01001038 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001039 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001040
Robert Richter0933e5c2009-04-29 12:47:12 +02001041 perf_counter_mask |=
1042 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001043
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301044 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar75f224c2008-12-14 21:58:46 +01001045
Yong Wangc323d952009-05-29 13:28:35 +08001046 perf_counters_lapic_init();
Ingo Molnar241771e2008-12-03 10:39:53 +01001047 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001048}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001049
Robert Richterbb775fc2009-04-29 12:47:14 +02001050static inline void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001051{
1052 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1053}
1054
Robert Richter4aeb0b42009-04-29 12:47:03 +02001055static const struct pmu pmu = {
1056 .enable = x86_pmu_enable,
1057 .disable = x86_pmu_disable,
1058 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001059 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001060};
1061
Robert Richter4aeb0b42009-04-29 12:47:03 +02001062const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001063{
1064 int err;
1065
1066 err = __hw_perf_counter_init(counter);
1067 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001068 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001069
Robert Richter4aeb0b42009-04-29 12:47:03 +02001070 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001071}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001072
1073/*
1074 * callchain support
1075 */
1076
1077static inline
1078void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1079{
1080 if (entry->nr < MAX_STACK_DEPTH)
1081 entry->ip[entry->nr++] = ip;
1082}
1083
1084static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1085static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1086
1087
1088static void
1089backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1090{
1091 /* Ignore warnings */
1092}
1093
1094static void backtrace_warning(void *data, char *msg)
1095{
1096 /* Ignore warnings */
1097}
1098
1099static int backtrace_stack(void *data, char *name)
1100{
1101 /* Don't bother with IRQ stacks for now */
1102 return -1;
1103}
1104
1105static void backtrace_address(void *data, unsigned long addr, int reliable)
1106{
1107 struct perf_callchain_entry *entry = data;
1108
1109 if (reliable)
1110 callchain_store(entry, addr);
1111}
1112
1113static const struct stacktrace_ops backtrace_ops = {
1114 .warning = backtrace_warning,
1115 .warning_symbol = backtrace_warning_symbol,
1116 .stack = backtrace_stack,
1117 .address = backtrace_address,
1118};
1119
1120static void
1121perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1122{
1123 unsigned long bp;
1124 char *stack;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001125 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001126
1127 callchain_store(entry, instruction_pointer(regs));
1128
1129 stack = ((char *)regs + sizeof(struct pt_regs));
1130#ifdef CONFIG_FRAME_POINTER
1131 bp = frame_pointer(regs);
1132#else
1133 bp = 0;
1134#endif
1135
1136 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001137
1138 entry->kernel = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001139}
1140
1141
1142struct stack_frame {
1143 const void __user *next_fp;
1144 unsigned long return_address;
1145};
1146
1147static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1148{
1149 int ret;
1150
1151 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1152 return 0;
1153
1154 ret = 1;
1155 pagefault_disable();
1156 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1157 ret = 0;
1158 pagefault_enable();
1159
1160 return ret;
1161}
1162
1163static void
1164perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1165{
1166 struct stack_frame frame;
1167 const void __user *fp;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001168 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001169
1170 regs = (struct pt_regs *)current->thread.sp0 - 1;
1171 fp = (void __user *)regs->bp;
1172
1173 callchain_store(entry, regs->ip);
1174
1175 while (entry->nr < MAX_STACK_DEPTH) {
1176 frame.next_fp = NULL;
1177 frame.return_address = 0;
1178
1179 if (!copy_stack_frame(fp, &frame))
1180 break;
1181
1182 if ((unsigned long)fp < user_stack_pointer(regs))
1183 break;
1184
1185 callchain_store(entry, frame.return_address);
1186 fp = frame.next_fp;
1187 }
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001188
1189 entry->user = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001190}
1191
1192static void
1193perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1194{
1195 int is_user;
1196
1197 if (!regs)
1198 return;
1199
1200 is_user = user_mode(regs);
1201
1202 if (!current || current->pid == 0)
1203 return;
1204
1205 if (is_user && current->state != TASK_RUNNING)
1206 return;
1207
1208 if (!is_user)
1209 perf_callchain_kernel(regs, entry);
1210
1211 if (current->mm)
1212 perf_callchain_user(regs, entry);
1213}
1214
1215struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1216{
1217 struct perf_callchain_entry *entry;
1218
1219 if (in_nmi())
1220 entry = &__get_cpu_var(nmi_entry);
1221 else
1222 entry = &__get_cpu_var(irq_entry);
1223
1224 entry->nr = 0;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001225 entry->hv = 0;
1226 entry->kernel = 0;
1227 entry->user = 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001228
1229 perf_do_callchain(regs, entry);
1230
1231 return entry;
1232}