blob: f4d59d4cf3f1878af849c9d82bcd157f2924eef5 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05306 * Copyright(C) 2009 Jaswinder Singh Rajput
Robert Richter39d81ea2009-04-29 12:47:05 +02007 * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
Ingo Molnar241771e2008-12-03 10:39:53 +01008 *
9 * For licencing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_counter.h>
13#include <linux/capability.h>
14#include <linux/notifier.h>
15#include <linux/hardirq.h>
16#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010017#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010018#include <linux/kdebug.h>
19#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020020#include <linux/uaccess.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020024#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010025
Ingo Molnar862a1a52008-12-17 13:09:20 +010026static u64 perf_counter_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010029 struct perf_counter *counters[X86_PMC_IDX_MAX];
30 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Robert Richter93904962009-04-29 12:47:15 +020031 unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010032 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010033 u64 throttle_ctrl;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010034 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010035};
36
37/*
Robert Richter5f4ec282009-04-29 12:47:04 +020038 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010039 */
Robert Richter5f4ec282009-04-29 12:47:04 +020040struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020041 const char *name;
42 int version;
Robert Richter39d81ea2009-04-29 12:47:05 +020043 int (*handle_irq)(struct pt_regs *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053044 u64 (*save_disable_all)(void);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010045 void (*restore_all)(u64);
Robert Richter7c90cc42009-04-29 12:47:18 +020046 void (*enable)(struct hw_perf_counter *, int);
Robert Richterd4369892009-04-29 12:47:19 +020047 void (*disable)(struct hw_perf_counter *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053048 unsigned eventsel;
49 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010050 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053052 int max_events;
Robert Richter0933e5c2009-04-29 12:47:12 +020053 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053057};
58
Robert Richter4a06bd82009-04-29 12:47:11 +020059static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053060
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010061static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
62 .enabled = 1,
63};
Ingo Molnar241771e2008-12-03 10:39:53 +010064
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053065/*
66 * Intel PerfMon v3. Used on Core2 and later.
67 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010068static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +010069{
Ingo Molnarf650a672008-12-23 12:17:29 +010070 [PERF_COUNT_CPU_CYCLES] = 0x003c,
Ingo Molnar241771e2008-12-03 10:39:53 +010071 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
72 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
73 [PERF_COUNT_CACHE_MISSES] = 0x412e,
74 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
75 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
Ingo Molnarf650a672008-12-23 12:17:29 +010076 [PERF_COUNT_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +010077};
78
Robert Richter5f4ec282009-04-29 12:47:04 +020079static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053080{
81 return intel_perfmon_event_map[event];
82}
Ingo Molnar241771e2008-12-03 10:39:53 +010083
Robert Richter5f4ec282009-04-29 12:47:04 +020084static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010085{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +010086#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
87#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
88#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010089
90#define CORE_EVNTSEL_MASK \
91 (CORE_EVNTSEL_EVENT_MASK | \
92 CORE_EVNTSEL_UNIT_MASK | \
93 CORE_EVNTSEL_COUNTER_MASK)
94
95 return event & CORE_EVNTSEL_MASK;
96}
97
Ingo Molnar241771e2008-12-03 10:39:53 +010098/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053099 * AMD Performance Monitor K7 and later.
100 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100101static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530102{
103 [PERF_COUNT_CPU_CYCLES] = 0x0076,
104 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
105 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
106 [PERF_COUNT_CACHE_MISSES] = 0x0081,
107 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
108 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
109};
110
Robert Richter5f4ec282009-04-29 12:47:04 +0200111static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530112{
113 return amd_perfmon_event_map[event];
114}
115
Robert Richter5f4ec282009-04-29 12:47:04 +0200116static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100117{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100118#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
119#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
120#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100121
122#define K7_EVNTSEL_MASK \
123 (K7_EVNTSEL_EVENT_MASK | \
124 K7_EVNTSEL_UNIT_MASK | \
125 K7_EVNTSEL_COUNTER_MASK)
126
127 return event & K7_EVNTSEL_MASK;
128}
129
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530130/*
Ingo Molnaree060942008-12-13 09:00:03 +0100131 * Propagate counter elapsed time into the generic counter.
132 * Can only be executed on the CPU where the counter is active.
133 * Returns the delta events processed.
134 */
135static void
136x86_perf_counter_update(struct perf_counter *counter,
137 struct hw_perf_counter *hwc, int idx)
138{
139 u64 prev_raw_count, new_raw_count, delta;
140
Ingo Molnaree060942008-12-13 09:00:03 +0100141 /*
142 * Careful: an NMI might modify the previous counter value.
143 *
144 * Our tactic to handle this is to first atomically read and
145 * exchange a new raw count - then add that new-prev delta
146 * count to the generic counter atomically:
147 */
148again:
149 prev_raw_count = atomic64_read(&hwc->prev_count);
150 rdmsrl(hwc->counter_base + idx, new_raw_count);
151
152 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
153 new_raw_count) != prev_raw_count)
154 goto again;
155
156 /*
157 * Now we have the new raw value and have updated the prev
158 * timestamp already. We can now calculate the elapsed delta
159 * (counter-)time and add that to the generic counter.
160 *
161 * Careful, not all hw sign-extends above the physical width
162 * of the count, so we do that by clipping the delta to 32 bits:
163 */
164 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100165
166 atomic64_add(delta, &counter->count);
167 atomic64_sub(delta, &hwc->period_left);
168}
169
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200170static atomic_t num_counters;
171static DEFINE_MUTEX(pmc_reserve_mutex);
172
173static bool reserve_pmc_hardware(void)
174{
175 int i;
176
177 if (nmi_watchdog == NMI_LOCAL_APIC)
178 disable_lapic_nmi_watchdog();
179
Robert Richter0933e5c2009-04-29 12:47:12 +0200180 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200181 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200182 goto perfctr_fail;
183 }
184
Robert Richter0933e5c2009-04-29 12:47:12 +0200185 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200186 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200187 goto eventsel_fail;
188 }
189
190 return true;
191
192eventsel_fail:
193 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200194 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200195
Robert Richter0933e5c2009-04-29 12:47:12 +0200196 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200197
198perfctr_fail:
199 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200200 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200201
202 if (nmi_watchdog == NMI_LOCAL_APIC)
203 enable_lapic_nmi_watchdog();
204
205 return false;
206}
207
208static void release_pmc_hardware(void)
209{
210 int i;
211
Robert Richter0933e5c2009-04-29 12:47:12 +0200212 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200213 release_perfctr_nmi(x86_pmu.perfctr + i);
214 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200215 }
216
217 if (nmi_watchdog == NMI_LOCAL_APIC)
218 enable_lapic_nmi_watchdog();
219}
220
221static void hw_perf_counter_destroy(struct perf_counter *counter)
222{
223 if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
224 release_pmc_hardware();
225 mutex_unlock(&pmc_reserve_mutex);
226 }
227}
228
Robert Richter85cf9db2009-04-29 12:47:20 +0200229static inline int x86_pmu_initialized(void)
230{
231 return x86_pmu.handle_irq != NULL;
232}
233
Ingo Molnaree060942008-12-13 09:00:03 +0100234/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100235 * Setup the hardware configuration for a given hw_event_type
236 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100237static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100238{
Ingo Molnar9f66a382008-12-10 12:33:23 +0100239 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar241771e2008-12-03 10:39:53 +0100240 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200241 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100242
Robert Richter85cf9db2009-04-29 12:47:20 +0200243 if (!x86_pmu_initialized())
244 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100245
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200246 err = 0;
247 if (atomic_inc_not_zero(&num_counters)) {
248 mutex_lock(&pmc_reserve_mutex);
249 if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
250 err = -EBUSY;
251 else
252 atomic_inc(&num_counters);
253 mutex_unlock(&pmc_reserve_mutex);
254 }
255 if (err)
256 return err;
257
Ingo Molnar241771e2008-12-03 10:39:53 +0100258 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100259 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100260 * (keep 'enabled' bit clear for now)
261 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100262 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100263
264 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100265 * Count user and OS events unless requested not to.
266 */
267 if (!hw_event->exclude_user)
268 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
269 if (!hw_event->exclude_kernel)
270 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
271
272 /*
273 * If privileged enough, allow NMI events:
Ingo Molnar241771e2008-12-03 10:39:53 +0100274 */
275 hwc->nmi = 0;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100276 if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
277 hwc->nmi = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100278
Ingo Molnar9f66a382008-12-10 12:33:23 +0100279 hwc->irq_period = hw_event->irq_period;
Ingo Molnar241771e2008-12-03 10:39:53 +0100280 /*
281 * Intel PMCs cannot be accessed sanely above 32 bit width,
282 * so we install an artificial 1<<31 period regardless of
283 * the generic counter period:
284 */
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530285 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
286 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
287 hwc->irq_period = 0x7FFFFFFF;
Ingo Molnar241771e2008-12-03 10:39:53 +0100288
Ingo Molnaree060942008-12-13 09:00:03 +0100289 atomic64_set(&hwc->period_left, hwc->irq_period);
Ingo Molnar241771e2008-12-03 10:39:53 +0100290
291 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100292 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100293 */
Peter Zijlstraf4a2deb42009-03-23 18:22:06 +0100294 if (perf_event_raw(hw_event)) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200295 hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100296 } else {
Robert Richter4a06bd82009-04-29 12:47:11 +0200297 if (perf_event_id(hw_event) >= x86_pmu.max_events)
Ingo Molnar241771e2008-12-03 10:39:53 +0100298 return -EINVAL;
299 /*
300 * The generic map:
301 */
Robert Richter4a06bd82009-04-29 12:47:11 +0200302 hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100303 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100304
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200305 counter->destroy = hw_perf_counter_destroy;
306
Ingo Molnar241771e2008-12-03 10:39:53 +0100307 return 0;
308}
309
Robert Richter5f4ec282009-04-29 12:47:04 +0200310static u64 intel_pmu_save_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100311{
312 u64 ctrl;
313
314 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
Ingo Molnar862a1a52008-12-17 13:09:20 +0100315 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100316
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100317 return ctrl;
318}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530319
Robert Richter5f4ec282009-04-29 12:47:04 +0200320static u64 amd_pmu_save_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530321{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100322 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
323 int enabled, idx;
324
325 enabled = cpuc->enabled;
326 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100327 /*
328 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200329 * counters proper, so that amd_pmu_enable_counter() does the
330 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100331 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100332 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530333
Robert Richter0933e5c2009-04-29 12:47:12 +0200334 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100335 u64 val;
336
Robert Richter93904962009-04-29 12:47:15 +0200337 if (!test_bit(idx, cpuc->active))
Robert Richter4295ee62009-04-29 12:47:01 +0200338 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530339 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200340 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
341 continue;
342 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
343 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530344 }
345
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100346 return enabled;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530347}
348
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530349u64 hw_perf_save_disable(void)
350{
Robert Richter85cf9db2009-04-29 12:47:20 +0200351 if (!x86_pmu_initialized())
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530352 return 0;
Robert Richter4a06bd82009-04-29 12:47:11 +0200353 return x86_pmu.save_disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530354}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100355/*
356 * Exported because of ACPI idle
357 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100358EXPORT_SYMBOL_GPL(hw_perf_save_disable);
Ingo Molnar241771e2008-12-03 10:39:53 +0100359
Robert Richter5f4ec282009-04-29 12:47:04 +0200360static void intel_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530361{
362 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
363}
364
Robert Richter5f4ec282009-04-29 12:47:04 +0200365static void amd_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530366{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100367 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530368 int idx;
369
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100370 cpuc->enabled = ctrl;
371 barrier();
372 if (!ctrl)
373 return;
374
Robert Richter0933e5c2009-04-29 12:47:12 +0200375 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4295ee62009-04-29 12:47:01 +0200376 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100377
Robert Richter93904962009-04-29 12:47:15 +0200378 if (!test_bit(idx, cpuc->active))
Robert Richter4295ee62009-04-29 12:47:01 +0200379 continue;
380 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
381 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
382 continue;
383 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
384 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530385 }
386}
387
Ingo Molnaree060942008-12-13 09:00:03 +0100388void hw_perf_restore(u64 ctrl)
389{
Robert Richter85cf9db2009-04-29 12:47:20 +0200390 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100391 return;
Robert Richter4a06bd82009-04-29 12:47:11 +0200392 x86_pmu.restore_all(ctrl);
Ingo Molnaree060942008-12-13 09:00:03 +0100393}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100394/*
395 * Exported because of ACPI idle
396 */
Ingo Molnaree060942008-12-13 09:00:03 +0100397EXPORT_SYMBOL_GPL(hw_perf_restore);
398
Robert Richterb7f88592009-04-29 12:47:06 +0200399static inline u64 intel_pmu_get_status(u64 mask)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100400{
401 u64 status;
402
403 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
404
405 return status;
406}
407
Robert Richterdee5d902009-04-29 12:47:07 +0200408static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100409{
410 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
411}
412
Robert Richter7c90cc42009-04-29 12:47:18 +0200413static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100414{
Robert Richter7c90cc42009-04-29 12:47:18 +0200415 int err;
Robert Richter7c90cc42009-04-29 12:47:18 +0200416 err = checking_wrmsrl(hwc->config_base + idx,
417 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100418}
419
Robert Richterd4369892009-04-29 12:47:19 +0200420static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100421{
Robert Richterd4369892009-04-29 12:47:19 +0200422 int err;
Robert Richterd4369892009-04-29 12:47:19 +0200423 err = checking_wrmsrl(hwc->config_base + idx,
424 hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100425}
426
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100427static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200428intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100429{
430 int idx = __idx - X86_PMC_IDX_FIXED;
431 u64 ctrl_val, mask;
432 int err;
433
434 mask = 0xfULL << (idx * 4);
435
436 rdmsrl(hwc->config_base, ctrl_val);
437 ctrl_val &= ~mask;
438 err = checking_wrmsrl(hwc->config_base, ctrl_val);
439}
440
441static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200442intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100443{
Robert Richterd4369892009-04-29 12:47:19 +0200444 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
445 intel_pmu_disable_fixed(hwc, idx);
446 return;
447 }
448
449 x86_pmu_disable_counter(hwc, idx);
450}
451
452static inline void
453amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
454{
455 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100456}
457
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100458static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100459
Ingo Molnaree060942008-12-13 09:00:03 +0100460/*
461 * Set the next IRQ period, based on the hwc->period_left value.
462 * To be called with the counter disabled in hw:
463 */
464static void
Robert Richter26816c22009-04-29 12:47:08 +0200465x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100466 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100467{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100468 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstra595258a2009-03-13 12:21:28 +0100469 s64 period = hwc->irq_period;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100470 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100471
Ingo Molnaree060942008-12-13 09:00:03 +0100472 /*
473 * If we are way outside a reasoable range then just skip forward:
474 */
475 if (unlikely(left <= -period)) {
476 left = period;
477 atomic64_set(&hwc->period_left, left);
478 }
479
480 if (unlikely(left <= 0)) {
481 left += period;
482 atomic64_set(&hwc->period_left, left);
483 }
484
Ingo Molnaree060942008-12-13 09:00:03 +0100485 per_cpu(prev_left[idx], smp_processor_id()) = left;
486
487 /*
488 * The hw counter starts counting from this counter offset,
489 * mark it to be able to extra future deltas:
490 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100491 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100492
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100493 err = checking_wrmsrl(hwc->counter_base + idx,
Robert Richter0933e5c2009-04-29 12:47:12 +0200494 (u64)(-left) & x86_pmu.counter_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100495}
496
497static inline void
Robert Richter7c90cc42009-04-29 12:47:18 +0200498intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100499{
500 int idx = __idx - X86_PMC_IDX_FIXED;
501 u64 ctrl_val, bits, mask;
502 int err;
503
504 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100505 * Enable IRQ generation (0x8),
506 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
507 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100508 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100509 bits = 0x8ULL;
510 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
511 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100512 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
513 bits |= 0x1;
514 bits <<= (idx * 4);
515 mask = 0xfULL << (idx * 4);
516
517 rdmsrl(hwc->config_base, ctrl_val);
518 ctrl_val &= ~mask;
519 ctrl_val |= bits;
520 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100521}
522
Robert Richter7c90cc42009-04-29 12:47:18 +0200523static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100524{
Robert Richter7c90cc42009-04-29 12:47:18 +0200525 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
526 intel_pmu_enable_fixed(hwc, idx);
527 return;
528 }
529
530 x86_pmu_enable_counter(hwc, idx);
531}
532
533static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
534{
535 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
536
537 if (cpuc->enabled)
538 x86_pmu_enable_counter(hwc, idx);
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530539 else
Robert Richterd4369892009-04-29 12:47:19 +0200540 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100541}
542
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100543static int
544fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +0100545{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100546 unsigned int event;
547
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530548 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
549 return -1;
550
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100551 if (unlikely(hwc->nmi))
552 return -1;
553
554 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
555
Robert Richter4a06bd82009-04-29 12:47:11 +0200556 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100557 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Robert Richter4a06bd82009-04-29 12:47:11 +0200558 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100559 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Robert Richter4a06bd82009-04-29 12:47:11 +0200560 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100561 return X86_PMC_IDX_FIXED_BUS_CYCLES;
562
Ingo Molnar862a1a52008-12-17 13:09:20 +0100563 return -1;
564}
565
Ingo Molnaree060942008-12-13 09:00:03 +0100566/*
567 * Find a PMC slot for the freshly enabled / scheduled in counter:
568 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200569static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100570{
571 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
572 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100573 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100574
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100575 idx = fixed_mode_idx(counter, hwc);
576 if (idx >= 0) {
577 /*
578 * Try to get the fixed counter, if that is already taken
579 * then try to get a generic counter:
580 */
581 if (test_and_set_bit(idx, cpuc->used))
582 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100583
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100584 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
585 /*
586 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
587 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
588 */
589 hwc->counter_base =
590 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +0100591 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100592 } else {
593 idx = hwc->idx;
594 /* Try to get the previous generic counter again */
595 if (test_and_set_bit(idx, cpuc->used)) {
596try_generic:
Robert Richter0933e5c2009-04-29 12:47:12 +0200597 idx = find_first_zero_bit(cpuc->used,
598 x86_pmu.num_counters);
599 if (idx == x86_pmu.num_counters)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100600 return -EAGAIN;
601
602 set_bit(idx, cpuc->used);
603 hwc->idx = idx;
604 }
Robert Richter4a06bd82009-04-29 12:47:11 +0200605 hwc->config_base = x86_pmu.eventsel;
606 hwc->counter_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100607 }
608
609 perf_counters_lapic_init(hwc->nmi);
610
Robert Richterd4369892009-04-29 12:47:19 +0200611 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100612
Ingo Molnar862a1a52008-12-17 13:09:20 +0100613 cpuc->counters[idx] = counter;
Robert Richter09534232009-04-29 12:47:16 +0200614 set_bit(idx, cpuc->active);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100615
Robert Richter26816c22009-04-29 12:47:08 +0200616 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +0200617 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100618
619 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100620}
621
622void perf_counter_print_debug(void)
623{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100624 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100625 struct cpu_hw_counters *cpuc;
Ingo Molnar1e125672008-12-09 12:18:18 +0100626 int cpu, idx;
627
Robert Richter0933e5c2009-04-29 12:47:12 +0200628 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100629 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100630
631 local_irq_disable();
632
633 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100634 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100635
Robert Richterfaa28ae2009-04-29 12:47:13 +0200636 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530637 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
638 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
639 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
640 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +0100641
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530642 pr_info("\n");
643 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
644 pr_info("CPU#%d: status: %016llx\n", cpu, status);
645 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
646 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530647 }
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530648 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
Ingo Molnar241771e2008-12-03 10:39:53 +0100649
Robert Richter0933e5c2009-04-29 12:47:12 +0200650 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200651 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
652 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100653
Ingo Molnaree060942008-12-13 09:00:03 +0100654 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100655
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530656 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100657 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530658 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100659 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530660 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100661 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100662 }
Robert Richter0933e5c2009-04-29 12:47:12 +0200663 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100664 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
665
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530666 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100667 cpu, idx, pmc_count);
668 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100669 local_irq_enable();
670}
671
Robert Richter4aeb0b42009-04-29 12:47:03 +0200672static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100673{
674 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
675 struct hw_perf_counter *hwc = &counter->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +0200676 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100677
Robert Richter09534232009-04-29 12:47:16 +0200678 /*
679 * Must be done before we disable, otherwise the nmi handler
680 * could reenable again:
681 */
682 clear_bit(idx, cpuc->active);
Robert Richterd4369892009-04-29 12:47:19 +0200683 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100684
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100685 /*
686 * Make sure the cleared pointer becomes visible before we
687 * (potentially) free the counter:
688 */
Robert Richter527e26a2009-04-29 12:47:02 +0200689 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +0100690
Ingo Molnaree060942008-12-13 09:00:03 +0100691 /*
692 * Drain the remaining delta count out of a counter
693 * that we are disabling:
694 */
695 x86_perf_counter_update(counter, hwc, idx);
Robert Richter09534232009-04-29 12:47:16 +0200696 cpuc->counters[idx] = NULL;
697 clear_bit(idx, cpuc->used);
Ingo Molnar241771e2008-12-03 10:39:53 +0100698}
699
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100700/*
Ingo Molnaree060942008-12-13 09:00:03 +0100701 * Save and restart an expired counter. Called by NMI contexts,
702 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100703 */
Robert Richter55de0f22009-04-29 12:47:09 +0200704static void intel_pmu_save_and_restart(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100705{
706 struct hw_perf_counter *hwc = &counter->hw;
707 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100708
Ingo Molnaree060942008-12-13 09:00:03 +0100709 x86_perf_counter_update(counter, hwc, idx);
Robert Richter26816c22009-04-29 12:47:08 +0200710 x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100711
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100712 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter7c90cc42009-04-29 12:47:18 +0200713 intel_pmu_enable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100714}
715
Ingo Molnar241771e2008-12-03 10:39:53 +0100716/*
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100717 * Maximum interrupt frequency of 100KHz per CPU
718 */
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530719#define PERFMON_MAX_INTERRUPTS (100000/HZ)
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100720
721/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100722 * This handler is triggered by the local APIC, so the APIC IRQ handling
723 * rules apply:
724 */
Robert Richter39d81ea2009-04-29 12:47:05 +0200725static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100726{
727 int bit, cpu = smp_processor_id();
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100728 u64 ack, status;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100729 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100730 int ret = 0;
Ingo Molnar43874d22008-12-09 12:23:59 +0100731
Robert Richter55de0f22009-04-29 12:47:09 +0200732 cpuc->throttle_ctrl = intel_pmu_save_disable_all();
Ingo Molnar241771e2008-12-03 10:39:53 +0100733
Robert Richterb7f88592009-04-29 12:47:06 +0200734 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100735 if (!status)
736 goto out;
737
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100738 ret = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100739again:
Mike Galbraithd278c482009-02-09 07:38:50 +0100740 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100741 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100742 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +0100743 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +0100744
745 clear_bit(bit, (unsigned long *) &status);
Robert Richter09534232009-04-29 12:47:16 +0200746 if (!test_bit(bit, cpuc->active))
Ingo Molnar241771e2008-12-03 10:39:53 +0100747 continue;
748
Robert Richter55de0f22009-04-29 12:47:09 +0200749 intel_pmu_save_and_restart(counter);
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200750 if (perf_counter_overflow(counter, nmi, regs, 0))
Robert Richterd4369892009-04-29 12:47:19 +0200751 intel_pmu_disable_counter(&counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +0100752 }
753
Robert Richterdee5d902009-04-29 12:47:07 +0200754 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +0100755
756 /*
757 * Repeat if there is more work to be done:
758 */
Robert Richterb7f88592009-04-29 12:47:06 +0200759 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar241771e2008-12-03 10:39:53 +0100760 if (status)
761 goto again;
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100762out:
Ingo Molnar241771e2008-12-03 10:39:53 +0100763 /*
Mike Galbraith1b023a92009-01-23 10:13:01 +0100764 * Restore - do not reenable when global enable is off or throttled:
Ingo Molnar241771e2008-12-03 10:39:53 +0100765 */
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100766 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
Robert Richter55de0f22009-04-29 12:47:09 +0200767 intel_pmu_restore_all(cpuc->throttle_ctrl);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100768
769 return ret;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100770}
771
Robert Richtera29aa8a2009-04-29 12:47:21 +0200772static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
773{
774 int cpu = smp_processor_id();
775 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
776 u64 val;
777 int handled = 0;
778 struct perf_counter *counter;
779 struct hw_perf_counter *hwc;
780 int idx;
781
782 ++cpuc->interrupts;
783 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
784 if (!test_bit(idx, cpuc->active))
785 continue;
786 counter = cpuc->counters[idx];
787 hwc = &counter->hw;
788 x86_perf_counter_update(counter, hwc, idx);
789 val = atomic64_read(&hwc->prev_count);
790 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
791 continue;
792 /* counter overflow */
793 x86_perf_counter_set_period(counter, hwc, idx);
794 handled = 1;
795 inc_irq_stat(apic_perf_irqs);
796 if (perf_counter_overflow(counter, nmi, regs, 0))
797 amd_pmu_disable_counter(hwc, idx);
798 else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
799 /*
800 * do not reenable when throttled, but reload
801 * the register
802 */
803 amd_pmu_disable_counter(hwc, idx);
804 else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
805 amd_pmu_enable_counter(hwc, idx);
806 }
807 return handled;
808}
Robert Richter39d81ea2009-04-29 12:47:05 +0200809
Mike Galbraith1b023a92009-01-23 10:13:01 +0100810void perf_counter_unthrottle(void)
811{
812 struct cpu_hw_counters *cpuc;
813
Robert Richter85cf9db2009-04-29 12:47:20 +0200814 if (!x86_pmu_initialized())
Mike Galbraith1b023a92009-01-23 10:13:01 +0100815 return;
816
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100817 cpuc = &__get_cpu_var(cpu_hw_counters);
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100818 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
Mike Galbraith1b023a92009-01-23 10:13:01 +0100819 if (printk_ratelimit())
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100820 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100821 hw_perf_restore(cpuc->throttle_ctrl);
Mike Galbraith1b023a92009-01-23 10:13:01 +0100822 }
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100823 cpuc->interrupts = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100824}
825
826void smp_perf_counter_interrupt(struct pt_regs *regs)
827{
828 irq_enter();
Ingo Molnar241771e2008-12-03 10:39:53 +0100829 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100830 ack_APIC_irq();
Robert Richter4a06bd82009-04-29 12:47:11 +0200831 x86_pmu.handle_irq(regs, 0);
Ingo Molnar241771e2008-12-03 10:39:53 +0100832 irq_exit();
833}
834
Peter Zijlstrab6276f32009-04-06 11:45:03 +0200835void smp_perf_pending_interrupt(struct pt_regs *regs)
836{
837 irq_enter();
838 ack_APIC_irq();
839 inc_irq_stat(apic_pending_irqs);
840 perf_counter_do_pending();
841 irq_exit();
842}
843
844void set_perf_counter_pending(void)
845{
846 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
847}
848
Mike Galbraith3415dd92009-01-23 14:16:53 +0100849void perf_counters_lapic_init(int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100850{
851 u32 apic_val;
852
Robert Richter85cf9db2009-04-29 12:47:20 +0200853 if (!x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +0100854 return;
Robert Richter85cf9db2009-04-29 12:47:20 +0200855
Ingo Molnar241771e2008-12-03 10:39:53 +0100856 /*
857 * Enable the performance counter vector in the APIC LVT:
858 */
859 apic_val = apic_read(APIC_LVTERR);
860
861 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
862 if (nmi)
863 apic_write(APIC_LVTPC, APIC_DM_NMI);
864 else
865 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
866 apic_write(APIC_LVTERR, apic_val);
867}
868
869static int __kprobes
870perf_counter_nmi_handler(struct notifier_block *self,
871 unsigned long cmd, void *__args)
872{
873 struct die_args *args = __args;
874 struct pt_regs *regs;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100875 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100876
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100877 switch (cmd) {
878 case DIE_NMI:
879 case DIE_NMI_IPI:
880 break;
881
882 default:
Ingo Molnar241771e2008-12-03 10:39:53 +0100883 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100884 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100885
886 regs = args->regs;
887
888 apic_write(APIC_LVTPC, APIC_DM_NMI);
Robert Richter4a06bd82009-04-29 12:47:11 +0200889 ret = x86_pmu.handle_irq(regs, 1);
Ingo Molnar241771e2008-12-03 10:39:53 +0100890
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100891 return ret ? NOTIFY_STOP : NOTIFY_OK;
Ingo Molnar241771e2008-12-03 10:39:53 +0100892}
893
894static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +0100895 .notifier_call = perf_counter_nmi_handler,
896 .next = NULL,
897 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +0100898};
899
Robert Richter5f4ec282009-04-29 12:47:04 +0200900static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200901 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +0200902 .handle_irq = intel_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200903 .save_disable_all = intel_pmu_save_disable_all,
904 .restore_all = intel_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200905 .enable = intel_pmu_enable_counter,
906 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530907 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
908 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200909 .event_map = intel_pmu_event_map,
910 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530911 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
912};
913
Robert Richter5f4ec282009-04-29 12:47:04 +0200914static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200915 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +0200916 .handle_irq = amd_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200917 .save_disable_all = amd_pmu_save_disable_all,
918 .restore_all = amd_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200919 .enable = amd_pmu_enable_counter,
920 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530921 .eventsel = MSR_K7_EVNTSEL0,
922 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200923 .event_map = amd_pmu_event_map,
924 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530925 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Robert Richter0933e5c2009-04-29 12:47:12 +0200926 .num_counters = 4,
927 .counter_bits = 48,
928 .counter_mask = (1ULL << 48) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530929};
930
Robert Richter72eae042009-04-29 12:47:10 +0200931static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100932{
Ingo Molnar703e9372008-12-17 10:51:15 +0100933 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100934 union cpuid10_eax eax;
935 unsigned int unused;
936 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200937 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +0100938
Robert Richterda1a7762009-04-29 12:46:58 +0200939 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
Robert Richter72eae042009-04-29 12:47:10 +0200940 return -ENODEV;
Robert Richterda1a7762009-04-29 12:46:58 +0200941
Ingo Molnar241771e2008-12-03 10:39:53 +0100942 /*
943 * Check whether the Architectural PerfMon supports
944 * Branch Misses Retired Event or not.
945 */
Ingo Molnar703e9372008-12-17 10:51:15 +0100946 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +0100947 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +0200948 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100949
Robert Richterfaa28ae2009-04-29 12:47:13 +0200950 version = eax.split.version_id;
951 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +0200952 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100953
Robert Richter4a06bd82009-04-29 12:47:11 +0200954 x86_pmu = intel_pmu;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200955 x86_pmu.version = version;
Robert Richter0933e5c2009-04-29 12:47:12 +0200956 x86_pmu.num_counters = eax.split.num_counters;
957 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
958 x86_pmu.counter_bits = eax.split.bit_width;
959 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530960
Robert Richter72eae042009-04-29 12:47:10 +0200961 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530962}
963
Robert Richter72eae042009-04-29 12:47:10 +0200964static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530965{
Robert Richter4a06bd82009-04-29 12:47:11 +0200966 x86_pmu = amd_pmu;
Robert Richter72eae042009-04-29 12:47:10 +0200967 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530968}
969
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530970void __init init_hw_perf_counters(void)
971{
Robert Richter72eae042009-04-29 12:47:10 +0200972 int err;
973
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530974 switch (boot_cpu_data.x86_vendor) {
975 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +0200976 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530977 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530978 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +0200979 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530980 break;
Robert Richter41389602009-04-29 12:47:00 +0200981 default:
982 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530983 }
Robert Richter72eae042009-04-29 12:47:10 +0200984 if (err != 0)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530985 return;
986
Robert Richterfaa28ae2009-04-29 12:47:13 +0200987 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
988 pr_info("... version: %d\n", x86_pmu.version);
989 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
990
Robert Richter0933e5c2009-04-29 12:47:12 +0200991 pr_info("... num counters: %d\n", x86_pmu.num_counters);
992 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
993 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +0100994 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +0200995 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
Ingo Molnar241771e2008-12-03 10:39:53 +0100996 }
Robert Richter0933e5c2009-04-29 12:47:12 +0200997 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
998 perf_max_counters = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +0100999
Robert Richter0933e5c2009-04-29 12:47:12 +02001000 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001001
Robert Richter0933e5c2009-04-29 12:47:12 +02001002 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1003 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001004 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001005 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
Ingo Molnar703e9372008-12-17 10:51:15 +01001006 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001007 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001008
Robert Richter0933e5c2009-04-29 12:47:12 +02001009 perf_counter_mask |=
1010 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001011
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301012 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar75f224c2008-12-14 21:58:46 +01001013
Ingo Molnar241771e2008-12-03 10:39:53 +01001014 perf_counters_lapic_init(0);
1015 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001016}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001017
Robert Richterbb775fc2009-04-29 12:47:14 +02001018static inline void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001019{
1020 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1021}
1022
Robert Richter4aeb0b42009-04-29 12:47:03 +02001023static const struct pmu pmu = {
1024 .enable = x86_pmu_enable,
1025 .disable = x86_pmu_disable,
1026 .read = x86_pmu_read,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001027};
1028
Robert Richter4aeb0b42009-04-29 12:47:03 +02001029const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001030{
1031 int err;
1032
1033 err = __hw_perf_counter_init(counter);
1034 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001035 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001036
Robert Richter4aeb0b42009-04-29 12:47:03 +02001037 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001038}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001039
1040/*
1041 * callchain support
1042 */
1043
1044static inline
1045void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1046{
1047 if (entry->nr < MAX_STACK_DEPTH)
1048 entry->ip[entry->nr++] = ip;
1049}
1050
1051static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1052static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1053
1054
1055static void
1056backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1057{
1058 /* Ignore warnings */
1059}
1060
1061static void backtrace_warning(void *data, char *msg)
1062{
1063 /* Ignore warnings */
1064}
1065
1066static int backtrace_stack(void *data, char *name)
1067{
1068 /* Don't bother with IRQ stacks for now */
1069 return -1;
1070}
1071
1072static void backtrace_address(void *data, unsigned long addr, int reliable)
1073{
1074 struct perf_callchain_entry *entry = data;
1075
1076 if (reliable)
1077 callchain_store(entry, addr);
1078}
1079
1080static const struct stacktrace_ops backtrace_ops = {
1081 .warning = backtrace_warning,
1082 .warning_symbol = backtrace_warning_symbol,
1083 .stack = backtrace_stack,
1084 .address = backtrace_address,
1085};
1086
1087static void
1088perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1089{
1090 unsigned long bp;
1091 char *stack;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001092 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001093
1094 callchain_store(entry, instruction_pointer(regs));
1095
1096 stack = ((char *)regs + sizeof(struct pt_regs));
1097#ifdef CONFIG_FRAME_POINTER
1098 bp = frame_pointer(regs);
1099#else
1100 bp = 0;
1101#endif
1102
1103 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001104
1105 entry->kernel = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001106}
1107
1108
1109struct stack_frame {
1110 const void __user *next_fp;
1111 unsigned long return_address;
1112};
1113
1114static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1115{
1116 int ret;
1117
1118 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1119 return 0;
1120
1121 ret = 1;
1122 pagefault_disable();
1123 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1124 ret = 0;
1125 pagefault_enable();
1126
1127 return ret;
1128}
1129
1130static void
1131perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1132{
1133 struct stack_frame frame;
1134 const void __user *fp;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001135 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001136
1137 regs = (struct pt_regs *)current->thread.sp0 - 1;
1138 fp = (void __user *)regs->bp;
1139
1140 callchain_store(entry, regs->ip);
1141
1142 while (entry->nr < MAX_STACK_DEPTH) {
1143 frame.next_fp = NULL;
1144 frame.return_address = 0;
1145
1146 if (!copy_stack_frame(fp, &frame))
1147 break;
1148
1149 if ((unsigned long)fp < user_stack_pointer(regs))
1150 break;
1151
1152 callchain_store(entry, frame.return_address);
1153 fp = frame.next_fp;
1154 }
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001155
1156 entry->user = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001157}
1158
1159static void
1160perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1161{
1162 int is_user;
1163
1164 if (!regs)
1165 return;
1166
1167 is_user = user_mode(regs);
1168
1169 if (!current || current->pid == 0)
1170 return;
1171
1172 if (is_user && current->state != TASK_RUNNING)
1173 return;
1174
1175 if (!is_user)
1176 perf_callchain_kernel(regs, entry);
1177
1178 if (current->mm)
1179 perf_callchain_user(regs, entry);
1180}
1181
1182struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1183{
1184 struct perf_callchain_entry *entry;
1185
1186 if (in_nmi())
1187 entry = &__get_cpu_var(nmi_entry);
1188 else
1189 entry = &__get_cpu_var(irq_entry);
1190
1191 entry->nr = 0;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001192 entry->hv = 0;
1193 entry->kernel = 0;
1194 entry->user = 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001195
1196 perf_do_callchain(regs, entry);
1197
1198 return entry;
1199}