blob: 2bda212a0010ca561e8f34a9d56c2502b47ff70b [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020031#include <asm/compat.h>
Lin Ming69092622011-03-03 10:34:50 +080032#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020033#include <asm/alternative.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010034
Kevin Winchesterde0428a2011-08-30 20:41:05 -030035#include "perf_event.h"
36
Peter Zijlstra7645a242010-03-08 13:51:31 +010037#if 0
38#undef wrmsrl
39#define wrmsrl(msr, val) \
40do { \
41 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
42 (unsigned long)(val)); \
43 native_write_msr((msr), (u32)((u64)(val)), \
44 (u32)((u64)(val) >> 32)); \
45} while (0)
46#endif
47
Kevin Winchesterde0428a2011-08-30 20:41:05 -030048struct x86_pmu x86_pmu __read_mostly;
Stephane Eranianefc9f052011-06-06 16:57:03 +020049
Kevin Winchesterde0428a2011-08-30 20:41:05 -030050DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010051 .enabled = 1,
52};
Ingo Molnar241771e2008-12-03 10:39:53 +010053
Kevin Winchesterde0428a2011-08-30 20:41:05 -030054u64 __read_mostly hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +020055 [PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Kevin Winchesterde0428a2011-08-30 20:41:05 -030058u64 __read_mostly hw_cache_extra_regs
Andi Kleene994d7d2011-03-03 10:34:48 +080059 [PERF_COUNT_HW_CACHE_MAX]
60 [PERF_COUNT_HW_CACHE_OP_MAX]
61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +020062
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053063/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020064 * Propagate event elapsed time into the generic event.
65 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +010066 * Returns the delta events processed.
67 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030068u64 x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +010069{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010070 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +020071 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020072 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010073 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020074 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +010075
Markus Metzger30dd5682009-07-21 15:56:48 +020076 if (idx == X86_PMC_IDX_FIXED_BTS)
77 return 0;
78
Ingo Molnaree060942008-12-13 09:00:03 +010079 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +010081 *
82 * Our tactic to handle this is to first atomically read and
83 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020084 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +010085 */
86again:
Peter Zijlstrae7850592010-05-21 14:43:08 +020087 prev_raw_count = local64_read(&hwc->prev_count);
Robert Richter73d6e522011-02-02 17:40:59 +010088 rdmsrl(hwc->event_base, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +010089
Peter Zijlstrae7850592010-05-21 14:43:08 +020090 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +010091 new_raw_count) != prev_raw_count)
92 goto again;
93
94 /*
95 * Now we have the new raw value and have updated the prev
96 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +010098 *
99 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200100 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100101 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200102 delta = (new_raw_count << shift) - (prev_raw_count << shift);
103 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100104
Peter Zijlstrae7850592010-05-21 14:43:08 +0200105 local64_add(delta, &event->count);
106 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200107
108 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100109}
110
Andi Kleena7e3ed12011-03-03 10:34:47 +0800111/*
112 * Find and validate any extra registers to set up.
113 */
114static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
115{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200116 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800117 struct extra_reg *er;
118
Stephane Eranianefc9f052011-06-06 16:57:03 +0200119 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800120
121 if (!x86_pmu.extra_regs)
122 return 0;
123
124 for (er = x86_pmu.extra_regs; er->msr; er++) {
125 if (er->event != (config & er->config_mask))
126 continue;
127 if (event->attr.config1 & ~er->valid_mask)
128 return -EINVAL;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200129
130 reg->idx = er->idx;
131 reg->config = event->attr.config1;
132 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800133 break;
134 }
135 return 0;
136}
137
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200138static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200139static DEFINE_MUTEX(pmc_reserve_mutex);
140
Robert Richterb27ea292010-03-17 12:49:10 +0100141#ifdef CONFIG_X86_LOCAL_APIC
142
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200143static bool reserve_pmc_hardware(void)
144{
145 int i;
146
Robert Richter948b1bb2010-03-29 18:36:50 +0200147 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100148 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200149 goto perfctr_fail;
150 }
151
Robert Richter948b1bb2010-03-29 18:36:50 +0200152 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100153 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200154 goto eventsel_fail;
155 }
156
157 return true;
158
159eventsel_fail:
160 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100161 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200162
Robert Richter948b1bb2010-03-29 18:36:50 +0200163 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200164
165perfctr_fail:
166 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100167 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200168
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200169 return false;
170}
171
172static void release_pmc_hardware(void)
173{
174 int i;
175
Robert Richter948b1bb2010-03-29 18:36:50 +0200176 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100177 release_perfctr_nmi(x86_pmu_event_addr(i));
178 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200179 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200180}
181
Robert Richterb27ea292010-03-17 12:49:10 +0100182#else
183
184static bool reserve_pmc_hardware(void) { return true; }
185static void release_pmc_hardware(void) {}
186
187#endif
188
Don Zickus33c6d6a2010-11-22 16:55:23 -0500189static bool check_hw_exists(void)
190{
191 u64 val, val_new = 0;
Peter Zijlstra44072042010-12-08 15:56:23 +0100192 int i, reg, ret = 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500193
Peter Zijlstra44072042010-12-08 15:56:23 +0100194 /*
195 * Check to see if the BIOS enabled any of the counters, if so
196 * complain and bail.
197 */
198 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100199 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100200 ret = rdmsrl_safe(reg, &val);
201 if (ret)
202 goto msr_fail;
203 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
204 goto bios_fail;
205 }
206
207 if (x86_pmu.num_counters_fixed) {
208 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
209 ret = rdmsrl_safe(reg, &val);
210 if (ret)
211 goto msr_fail;
212 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
213 if (val & (0x03 << i*4))
214 goto bios_fail;
215 }
216 }
217
218 /*
219 * Now write a value and read it back to see if it matches,
220 * this is needed to detect certain hardware emulators (qemu/kvm)
221 * that don't trap on the MSR access and always return 0s.
222 */
Don Zickus33c6d6a2010-11-22 16:55:23 -0500223 val = 0xabcdUL;
Robert Richter41bf4982011-02-02 17:40:57 +0100224 ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
225 ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500226 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100227 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500228
229 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100230
231bios_fail:
Ingo Molnar45daae52011-03-25 10:24:23 +0100232 /*
233 * We still allow the PMU driver to operate:
234 */
235 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
Peter Zijlstra44072042010-12-08 15:56:23 +0100236 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
Ingo Molnar45daae52011-03-25 10:24:23 +0100237
238 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100239
240msr_fail:
241 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
Ingo Molnar45daae52011-03-25 10:24:23 +0100242
Peter Zijlstra44072042010-12-08 15:56:23 +0100243 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500244}
245
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200246static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200247{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200248 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200249 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100250 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200251 mutex_unlock(&pmc_reserve_mutex);
252 }
253}
254
Robert Richter85cf9db2009-04-29 12:47:20 +0200255static inline int x86_pmu_initialized(void)
256{
257 return x86_pmu.handle_irq != NULL;
258}
259
Ingo Molnar8326f442009-06-05 20:22:46 +0200260static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800261set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200262{
Andi Kleene994d7d2011-03-03 10:34:48 +0800263 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200264 unsigned int cache_type, cache_op, cache_result;
265 u64 config, val;
266
267 config = attr->config;
268
269 cache_type = (config >> 0) & 0xff;
270 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
271 return -EINVAL;
272
273 cache_op = (config >> 8) & 0xff;
274 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
275 return -EINVAL;
276
277 cache_result = (config >> 16) & 0xff;
278 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
279 return -EINVAL;
280
281 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
282
283 if (val == 0)
284 return -ENOENT;
285
286 if (val == -1)
287 return -EINVAL;
288
289 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800290 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
291 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200292}
293
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300294int x86_setup_perfctr(struct perf_event *event)
Robert Richterc1726f32010-04-13 22:23:11 +0200295{
296 struct perf_event_attr *attr = &event->attr;
297 struct hw_perf_event *hwc = &event->hw;
298 u64 config;
299
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100300 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200301 hwc->sample_period = x86_pmu.max_period;
302 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200303 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200304 } else {
305 /*
306 * If we have a PMU initialized but no APIC
307 * interrupts, we cannot sample hardware
308 * events (user-space has to fall back and
309 * sample via a hrtimer based software event):
310 */
311 if (!x86_pmu.apic)
312 return -EOPNOTSUPP;
313 }
314
315 if (attr->type == PERF_TYPE_RAW)
Peter Zijlstraed13ec52011-11-14 10:03:25 +0100316 return x86_pmu_extra_regs(event->attr.config, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200317
318 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800319 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200320
321 if (attr->config >= x86_pmu.max_events)
322 return -EINVAL;
323
324 /*
325 * The generic map:
326 */
327 config = x86_pmu.event_map(attr->config);
328
329 if (config == 0)
330 return -ENOENT;
331
332 if (config == -1LL)
333 return -EINVAL;
334
335 /*
336 * Branch tracing:
337 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200338 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
339 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200340 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200341 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200342 return -EOPNOTSUPP;
343
344 /* BTS is currently only allowed for user-mode. */
345 if (!attr->exclude_kernel)
346 return -EOPNOTSUPP;
347 }
348
349 hwc->config |= config;
350
351 return 0;
352}
Robert Richter4261e0e2010-04-13 22:23:10 +0200353
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300354int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300355{
Peter Zijlstraab608342010-04-08 23:03:20 +0200356 if (event->attr.precise_ip) {
357 int precise = 0;
358
359 /* Support for constant skid */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200360 if (x86_pmu.pebs_active) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200361 precise++;
362
Peter Zijlstra5553be22010-10-19 14:38:11 +0200363 /* Support for IP fixup */
364 if (x86_pmu.lbr_nr)
365 precise++;
366 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200367
368 if (event->attr.precise_ip > precise)
369 return -EOPNOTSUPP;
370 }
371
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300372 /*
373 * Generate PMC IRQs:
374 * (keep 'enabled' bit clear for now)
375 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200376 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300377
378 /*
379 * Count user and OS events unless requested not to
380 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200381 if (!event->attr.exclude_user)
382 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
383 if (!event->attr.exclude_kernel)
384 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
385
386 if (event->attr.type == PERF_TYPE_RAW)
387 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300388
Robert Richter9d0fcba62010-04-13 22:23:12 +0200389 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300390}
391
Ingo Molnaree060942008-12-13 09:00:03 +0100392/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200393 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100394 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200395static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100396{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200397 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100398
Robert Richter85cf9db2009-04-29 12:47:20 +0200399 if (!x86_pmu_initialized())
400 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100401
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200402 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200403 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200404 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200405 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200406 if (!reserve_pmc_hardware())
407 err = -EBUSY;
Peter Zijlstraf80c9e32010-10-19 14:50:02 +0200408 else
409 reserve_ds_buffers();
Markus Metzger30dd5682009-07-21 15:56:48 +0200410 }
411 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200412 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200413 mutex_unlock(&pmc_reserve_mutex);
414 }
415 if (err)
416 return err;
417
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200418 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200419
Robert Richter4261e0e2010-04-13 22:23:10 +0200420 event->hw.idx = -1;
421 event->hw.last_cpu = -1;
422 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200423
Stephane Eranianefc9f052011-06-06 16:57:03 +0200424 /* mark unused */
425 event->hw.extra_reg.idx = EXTRA_REG_NONE;
426
Robert Richter9d0fcba62010-04-13 22:23:12 +0200427 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200428}
429
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300430void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530431{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200432 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200433 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100434
Robert Richter948b1bb2010-03-29 18:36:50 +0200435 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100436 u64 val;
437
Robert Richter43f62012009-04-29 16:55:56 +0200438 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200439 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100440 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100441 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200442 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100443 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100444 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530445 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530446}
447
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200448static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530449{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200450 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
451
Robert Richter85cf9db2009-04-29 12:47:20 +0200452 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200453 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200454
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100455 if (!cpuc->enabled)
456 return;
457
458 cpuc->n_added = 0;
459 cpuc->enabled = 0;
460 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200461
462 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530463}
Ingo Molnar241771e2008-12-03 10:39:53 +0100464
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300465void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530466{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200467 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530468 int idx;
469
Robert Richter948b1bb2010-03-29 18:36:50 +0200470 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100471 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100472
Robert Richter43f62012009-04-29 16:55:56 +0200473 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200474 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200475
Robert Richterd45dd922011-02-02 17:40:56 +0100476 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530477 }
478}
479
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200480static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200481
482static inline int is_x86_event(struct perf_event *event)
483{
484 return event->pmu == &pmu;
485}
486
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300487int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200488{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100489 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200490 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100491 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200492 struct hw_perf_event *hwc;
493
494 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
495
496 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100497 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
498 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200499 }
500
501 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200502 * fastpath, try to reuse previous register
503 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100504 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200505 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100506 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200507
508 /* never assigned */
509 if (hwc->idx == -1)
510 break;
511
512 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100513 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200514 break;
515
516 /* not already used */
517 if (test_bit(hwc->idx, used_mask))
518 break;
519
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100520 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200521 if (assign)
522 assign[i] = hwc->idx;
523 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100524 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200525 goto done;
526
527 /*
528 * begin slow path
529 */
530
531 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
532
533 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200534 * weight = number of possible counters
535 *
536 * 1 = most constrained, only works on one counter
537 * wmax = least constrained, works on any counter
538 *
539 * assign events to counters starting with most
540 * constrained events.
541 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200542 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200543
544 /*
545 * when fixed event counters are present,
546 * wmax is incremented by 1 to account
547 * for one more choice
548 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200549 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200550 wmax++;
551
Stephane Eranian81130702010-01-21 17:39:01 +0200552 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200553 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200554 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100555 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200556 hwc = &cpuc->event_list[i]->hw;
557
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100558 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200559 continue;
560
Akinobu Mita984b3f52010-03-05 13:41:37 -0800561 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200562 if (!test_bit(j, used_mask))
563 break;
564 }
565
566 if (j == X86_PMC_IDX_MAX)
567 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200568
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100569 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200570
Stephane Eranian1da53e02010-01-18 10:58:01 +0200571 if (assign)
572 assign[i] = j;
573 num--;
574 }
575 }
Stephane Eranian81130702010-01-21 17:39:01 +0200576done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200577 /*
578 * scheduling failed or is just a simulation,
579 * free resources if necessary
580 */
581 if (!assign || num) {
582 for (i = 0; i < n; i++) {
583 if (x86_pmu.put_event_constraints)
584 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
585 }
586 }
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100587 return num ? -EINVAL : 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200588}
589
590/*
591 * dogrp: true if must collect siblings events (group)
592 * returns total number of events and error code
593 */
594static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
595{
596 struct perf_event *event;
597 int n, max_count;
598
Robert Richter948b1bb2010-03-29 18:36:50 +0200599 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200600
601 /* current number of events already accepted */
602 n = cpuc->n_events;
603
604 if (is_x86_event(leader)) {
605 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100606 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200607 cpuc->event_list[n] = leader;
608 n++;
609 }
610 if (!dogrp)
611 return n;
612
613 list_for_each_entry(event, &leader->sibling_list, group_entry) {
614 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200615 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200616 continue;
617
618 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100619 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200620
621 cpuc->event_list[n] = event;
622 n++;
623 }
624 return n;
625}
626
Stephane Eranian1da53e02010-01-18 10:58:01 +0200627static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200628 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200629{
Stephane Eranian447a1942010-02-01 14:50:01 +0200630 struct hw_perf_event *hwc = &event->hw;
631
632 hwc->idx = cpuc->assign[i];
633 hwc->last_cpu = smp_processor_id();
634 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200635
636 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
637 hwc->config_base = 0;
638 hwc->event_base = 0;
639 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
640 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Stephane Eranianfc66c522011-03-19 18:20:05 +0100641 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200642 } else {
Robert Richter73d6e522011-02-02 17:40:59 +0100643 hwc->config_base = x86_pmu_config_addr(hwc->idx);
644 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200645 }
646}
647
Stephane Eranian447a1942010-02-01 14:50:01 +0200648static inline int match_prev_assignment(struct hw_perf_event *hwc,
649 struct cpu_hw_events *cpuc,
650 int i)
651{
652 return hwc->idx == cpuc->assign[i] &&
653 hwc->last_cpu == smp_processor_id() &&
654 hwc->last_tag == cpuc->tags[i];
655}
656
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200657static void x86_pmu_start(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100658
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200659static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +0100660{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200661 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
662 struct perf_event *event;
663 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100664 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200665
Robert Richter85cf9db2009-04-29 12:47:20 +0200666 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100667 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100668
669 if (cpuc->enabled)
670 return;
671
Stephane Eranian1da53e02010-01-18 10:58:01 +0200672 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100673 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200674 /*
675 * apply assignment obtained either from
676 * hw_perf_group_sched_in() or x86_pmu_enable()
677 *
678 * step1: save events moving to new counters
679 * step2: reprogram moved events into new counters
680 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100681 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200682 event = cpuc->event_list[i];
683 hwc = &event->hw;
684
Stephane Eranian447a1942010-02-01 14:50:01 +0200685 /*
686 * we can avoid reprogramming counter if:
687 * - assigned same counter as last time
688 * - running on same CPU as last time
689 * - no other event has used the counter since
690 */
691 if (hwc->idx == -1 ||
692 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200693 continue;
694
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200695 /*
696 * Ensure we don't accidentally enable a stopped
697 * counter simply because we rescheduled.
698 */
699 if (hwc->state & PERF_HES_STOPPED)
700 hwc->state |= PERF_HES_ARCH;
701
702 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200703 }
704
705 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200706 event = cpuc->event_list[i];
707 hwc = &event->hw;
708
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100709 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200710 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100711 else if (i < n_running)
712 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200713
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200714 if (hwc->state & PERF_HES_ARCH)
715 continue;
716
717 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200718 }
719 cpuc->n_added = 0;
720 perf_events_lapic_init();
721 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100722
723 cpuc->enabled = 1;
724 barrier();
725
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100726 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100727}
Ingo Molnaree060942008-12-13 09:00:03 +0100728
Tejun Heo245b2e72009-06-24 15:13:48 +0900729static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100730
Ingo Molnaree060942008-12-13 09:00:03 +0100731/*
732 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200733 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100734 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300735int x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100736{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100737 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200738 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200739 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100740 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100741
Markus Metzger30dd5682009-07-21 15:56:48 +0200742 if (idx == X86_PMC_IDX_FIXED_BTS)
743 return 0;
744
Ingo Molnaree060942008-12-13 09:00:03 +0100745 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200746 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100747 */
748 if (unlikely(left <= -period)) {
749 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200750 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200751 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200752 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100753 }
754
755 if (unlikely(left <= 0)) {
756 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200757 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200758 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200759 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100760 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200761 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200762 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200763 */
764 if (unlikely(left < 2))
765 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100766
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200767 if (left > x86_pmu.max_period)
768 left = x86_pmu.max_period;
769
Tejun Heo245b2e72009-06-24 15:13:48 +0900770 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100771
772 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200773 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100774 * mark it to be able to extra future deltas:
775 */
Peter Zijlstrae7850592010-05-21 14:43:08 +0200776 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100777
Robert Richter73d6e522011-02-02 17:40:59 +0100778 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400779
780 /*
781 * Due to erratum on certan cpu we need
782 * a second write to be sure the register
783 * is updated properly
784 */
785 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +0100786 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +0200787 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400788 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200789
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200790 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200791
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200792 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100793}
794
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300795void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200796{
Tejun Heo0a3aee02010-12-18 16:28:55 +0100797 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +0200798 __x86_pmu_enable_event(&event->hw,
799 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +0100800}
801
Ingo Molnaree060942008-12-13 09:00:03 +0100802/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200803 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +0200804 *
805 * The event is added to the group of enabled events
806 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200807 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200808static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200809{
810 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200811 struct hw_perf_event *hwc;
812 int assign[X86_PMC_IDX_MAX];
813 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200814
Stephane Eranian1da53e02010-01-18 10:58:01 +0200815 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200816
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200817 perf_pmu_disable(event->pmu);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200818 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200819 ret = n = collect_events(cpuc, event, false);
820 if (ret < 0)
821 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200822
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200823 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
824 if (!(flags & PERF_EF_START))
825 hwc->state |= PERF_HES_ARCH;
826
Lin Ming4d1c52b2010-04-23 13:56:12 +0800827 /*
828 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300829 * skip the schedulability test here, it will be performed
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200830 * at commit time (->commit_txn) as a whole
Lin Ming4d1c52b2010-04-23 13:56:12 +0800831 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200832 if (cpuc->group_flag & PERF_EVENT_TXN)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200833 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +0800834
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300835 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200836 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200837 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200838 /*
839 * copy new assignment, now we know it is possible
840 * will be used by hw_perf_enable()
841 */
842 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100843
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200844done_collect:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200845 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100846 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +0200847 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100848
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200849 ret = 0;
850out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200851 perf_pmu_enable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200852 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100853}
854
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200855static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200856{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100857 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
858 int idx = event->hw.idx;
859
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200860 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
861 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +0200862
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200863 if (WARN_ON_ONCE(idx == -1))
864 return;
865
866 if (flags & PERF_EF_RELOAD) {
867 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
868 x86_perf_event_set_period(event);
869 }
870
871 event->hw.state = 0;
872
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100873 cpuc->events[idx] = event;
874 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +0200875 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100876 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100877 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200878}
879
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100881{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100882 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100883 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200884 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200885 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100886 int cpu, idx;
887
Robert Richter948b1bb2010-03-29 18:36:50 +0200888 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100889 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100890
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200891 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100892
893 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200894 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100895
Robert Richterfaa28ae2009-04-29 12:47:13 +0200896 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530897 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
898 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
899 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
900 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100901 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100902
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530903 pr_info("\n");
904 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
905 pr_info("CPU#%d: status: %016llx\n", cpu, status);
906 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
907 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100908 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530909 }
Peter Zijlstra7645a242010-03-08 13:51:31 +0100910 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +0100911
Robert Richter948b1bb2010-03-29 18:36:50 +0200912 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100913 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
914 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100915
Tejun Heo245b2e72009-06-24 15:13:48 +0900916 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100917
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530918 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100919 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530920 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100921 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530922 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100923 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100924 }
Robert Richter948b1bb2010-03-29 18:36:50 +0200925 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100926 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
927
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530928 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100929 cpu, idx, pmc_count);
930 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200931 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100932}
933
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300934void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +0100935{
Stephane Eraniand76a0812010-02-08 17:06:01 +0200936 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200937 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +0100938
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200939 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
940 x86_pmu.disable(event);
941 cpuc->events[hwc->idx] = NULL;
942 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
943 hwc->state |= PERF_HES_STOPPED;
944 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100945
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200946 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
947 /*
948 * Drain the remaining delta count out of a event
949 * that we are disabling:
950 */
951 x86_perf_event_update(event);
952 hwc->state |= PERF_HES_UPTODATE;
953 }
Peter Zijlstra2e841872010-01-25 15:58:43 +0100954}
955
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200956static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +0100957{
958 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
959 int i;
960
Stephane Eranian90151c352010-05-25 16:23:10 +0200961 /*
962 * If we're called during a txn, we don't need to do anything.
963 * The events never got scheduled and ->cancel_txn will truncate
964 * the event_list.
965 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200966 if (cpuc->group_flag & PERF_EVENT_TXN)
Stephane Eranian90151c352010-05-25 16:23:10 +0200967 return;
968
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200969 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200970
Stephane Eranian1da53e02010-01-18 10:58:01 +0200971 for (i = 0; i < cpuc->n_events; i++) {
972 if (event == cpuc->event_list[i]) {
973
974 if (x86_pmu.put_event_constraints)
975 x86_pmu.put_event_constraints(cpuc, event);
976
977 while (++i < cpuc->n_events)
978 cpuc->event_list[i-1] = cpuc->event_list[i];
979
980 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +0100981 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200982 }
983 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200984 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +0100985}
986
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300987int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +0200988{
Peter Zijlstradf1a1322009-06-10 21:02:22 +0200989 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200990 struct cpu_hw_events *cpuc;
991 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -0400992 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200993 u64 val;
994
Peter Zijlstradc1d6282010-03-03 15:55:04 +0100995 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +0200996
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200997 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +0200998
Don Zickus2bce5da2011-04-27 06:32:33 -0400999 /*
1000 * Some chipsets need to unmask the LVTPC in a particular spot
1001 * inside the nmi handler. As a result, the unmasking was pushed
1002 * into all the nmi handlers.
1003 *
1004 * This generic handler doesn't seem to have any issues where the
1005 * unmasking occurs so it was left at the top.
1006 */
1007 apic_write(APIC_LVTPC, APIC_DM_NMI);
1008
Robert Richter948b1bb2010-03-29 18:36:50 +02001009 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001010 if (!test_bit(idx, cpuc->active_mask)) {
1011 /*
1012 * Though we deactivated the counter some cpus
1013 * might still deliver spurious interrupts still
1014 * in flight. Catch them:
1015 */
1016 if (__test_and_clear_bit(idx, cpuc->running))
1017 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001018 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001019 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001020
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001021 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001022
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001023 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001024 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001025 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001026
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001027 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001028 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001029 */
Robert Richter4177c422010-09-02 15:07:48 -04001030 handled++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001031 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001032
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001033 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001034 continue;
1035
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001036 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001037 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001038 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001039
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001040 if (handled)
1041 inc_irq_stat(apic_perf_irqs);
1042
Robert Richtera29aa8a2009-04-29 12:47:21 +02001043 return handled;
1044}
Robert Richter39d81ea2009-04-29 12:47:05 +02001045
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001046void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001047{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001048 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001049 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001050
Ingo Molnar241771e2008-12-03 10:39:53 +01001051 /*
Yong Wangc323d952009-05-29 13:28:35 +08001052 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001053 */
Yong Wangc323d952009-05-29 13:28:35 +08001054 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001055}
1056
1057static int __kprobes
Don Zickus9c48f1c2011-09-30 15:06:21 -04001058perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001059{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001060 if (!atomic_read(&active_events))
Don Zickus9c48f1c2011-09-30 15:06:21 -04001061 return NMI_DONE;
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001062
Don Zickus9c48f1c2011-09-30 15:06:21 -04001063 return x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001064}
1065
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001066struct event_constraint emptyconstraint;
1067struct event_constraint unconstrained;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301068
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001069static int __cpuinit
1070x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1071{
1072 unsigned int cpu = (long)hcpu;
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001073 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001074 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001075
1076 switch (action & ~CPU_TASKS_FROZEN) {
1077 case CPU_UP_PREPARE:
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001078 cpuc->kfree_on_online = NULL;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001079 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001080 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001081 break;
1082
1083 case CPU_STARTING:
1084 if (x86_pmu.cpu_starting)
1085 x86_pmu.cpu_starting(cpu);
1086 break;
1087
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001088 case CPU_ONLINE:
1089 kfree(cpuc->kfree_on_online);
1090 break;
1091
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001092 case CPU_DYING:
1093 if (x86_pmu.cpu_dying)
1094 x86_pmu.cpu_dying(cpu);
1095 break;
1096
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001097 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001098 case CPU_DEAD:
1099 if (x86_pmu.cpu_dead)
1100 x86_pmu.cpu_dead(cpu);
1101 break;
1102
1103 default:
1104 break;
1105 }
1106
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001107 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001108}
1109
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001110static void __init pmu_check_apic(void)
1111{
1112 if (cpu_has_apic)
1113 return;
1114
1115 x86_pmu.apic = 0;
1116 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1117 pr_info("no hardware sampling interrupt available.\n");
1118}
1119
Yinghai Ludda99112011-01-21 15:30:01 -08001120static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301121{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001122 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001123 int err;
1124
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001125 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001126
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301127 switch (boot_cpu_data.x86_vendor) {
1128 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001129 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301130 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301131 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001132 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301133 break;
Robert Richter41389602009-04-29 12:47:00 +02001134 default:
Peter Zijlstra004417a2010-11-25 18:38:29 +01001135 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301136 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001137 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001138 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001139 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001140 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301141
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001142 pmu_check_apic();
1143
Don Zickus33c6d6a2010-11-22 16:55:23 -05001144 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001145 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001146 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001147
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001148 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001149
Peter Zijlstra3c447802010-03-04 21:49:01 +01001150 if (x86_pmu.quirks)
1151 x86_pmu.quirks();
1152
Robert Richter948b1bb2010-03-29 18:36:50 +02001153 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001154 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001155 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1156 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001157 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001158 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar241771e2008-12-03 10:39:53 +01001159
Robert Richter948b1bb2010-03-29 18:36:50 +02001160 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001162 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1163 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001164 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001165
Robert Richterd6dc0b42010-03-17 12:49:13 +01001166 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001167 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001168
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001169 perf_events_lapic_init();
Don Zickus9c48f1c2011-09-30 15:06:21 -04001170 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001171
Peter Zijlstra63b14642010-01-22 16:32:17 +01001172 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001173 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1174 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001175
Peter Zijlstrab622d642010-02-01 15:36:30 +01001176 if (x86_pmu.event_constraints) {
1177 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001178 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001179 continue;
1180
Robert Richter948b1bb2010-03-29 18:36:50 +02001181 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1182 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001183 }
1184 }
1185
Ingo Molnar57c0c152009-09-21 12:20:38 +02001186 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001187 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1188 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1189 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001190 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001191 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001192 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001193
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001194 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001195 perf_cpu_notifier(x86_pmu_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001196
1197 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001198}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001199early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001200
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001201static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001202{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001203 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001204}
1205
Lin Ming4d1c52b2010-04-23 13:56:12 +08001206/*
1207 * Start group events scheduling transaction
1208 * Set the flag to make pmu::enable() not perform the
1209 * schedulability test, it will be performed at commit time
1210 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001211static void x86_pmu_start_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001212{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001213 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001214 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1215 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001216}
1217
1218/*
1219 * Stop group events scheduling transaction
1220 * Clear the flag and pmu::enable() will perform the
1221 * schedulability test.
1222 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001223static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001224{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001225 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
Stephane Eranian90151c352010-05-25 16:23:10 +02001226 /*
1227 * Truncate the collected events.
1228 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001229 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1230 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001231 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001232}
1233
1234/*
1235 * Commit group events scheduling transaction
1236 * Perform the group schedulability test as a whole
1237 * Return 0 if success
1238 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001239static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001240{
1241 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1242 int assign[X86_PMC_IDX_MAX];
1243 int n, ret;
1244
1245 n = cpuc->n_events;
1246
1247 if (!x86_pmu_initialized())
1248 return -EAGAIN;
1249
1250 ret = x86_pmu.schedule_events(cpuc, n, assign);
1251 if (ret)
1252 return ret;
1253
1254 /*
1255 * copy new assignment, now we know it is possible
1256 * will be used by hw_perf_enable()
1257 */
1258 memcpy(cpuc->assign, assign, n*sizeof(int));
1259
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001260 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001261 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001262 return 0;
1263}
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001264/*
1265 * a fake_cpuc is used to validate event groups. Due to
1266 * the extra reg logic, we need to also allocate a fake
1267 * per_core and per_cpu structure. Otherwise, group events
1268 * using extra reg may conflict without the kernel being
1269 * able to catch this when the last event gets added to
1270 * the group.
1271 */
1272static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1273{
1274 kfree(cpuc->shared_regs);
1275 kfree(cpuc);
1276}
1277
1278static struct cpu_hw_events *allocate_fake_cpuc(void)
1279{
1280 struct cpu_hw_events *cpuc;
1281 int cpu = raw_smp_processor_id();
1282
1283 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1284 if (!cpuc)
1285 return ERR_PTR(-ENOMEM);
1286
1287 /* only needed, if we have extra_regs */
1288 if (x86_pmu.extra_regs) {
1289 cpuc->shared_regs = allocate_shared_regs(cpu);
1290 if (!cpuc->shared_regs)
1291 goto error;
1292 }
1293 return cpuc;
1294error:
1295 free_fake_cpuc(cpuc);
1296 return ERR_PTR(-ENOMEM);
1297}
Lin Ming4d1c52b2010-04-23 13:56:12 +08001298
Stephane Eranian1da53e02010-01-18 10:58:01 +02001299/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001300 * validate that we can schedule this event
1301 */
1302static int validate_event(struct perf_event *event)
1303{
1304 struct cpu_hw_events *fake_cpuc;
1305 struct event_constraint *c;
1306 int ret = 0;
1307
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001308 fake_cpuc = allocate_fake_cpuc();
1309 if (IS_ERR(fake_cpuc))
1310 return PTR_ERR(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001311
1312 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1313
1314 if (!c || !c->weight)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01001315 ret = -EINVAL;
Peter Zijlstraca037702010-03-02 19:52:12 +01001316
1317 if (x86_pmu.put_event_constraints)
1318 x86_pmu.put_event_constraints(fake_cpuc, event);
1319
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001320 free_fake_cpuc(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001321
1322 return ret;
1323}
1324
1325/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001326 * validate a single event group
1327 *
1328 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001329 * - check events are compatible which each other
1330 * - events do not compete for the same counter
1331 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001332 *
1333 * validation ensures the group can be loaded onto the
1334 * PMU if it was the only group available.
1335 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001336static int validate_group(struct perf_event *event)
1337{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001338 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001339 struct cpu_hw_events *fake_cpuc;
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01001340 int ret = -EINVAL, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001341
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001342 fake_cpuc = allocate_fake_cpuc();
1343 if (IS_ERR(fake_cpuc))
1344 return PTR_ERR(fake_cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001345 /*
1346 * the event is not yet connected with its
1347 * siblings therefore we must first collect
1348 * existing siblings, then add the new event
1349 * before we can simulate the scheduling
1350 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001351 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001352 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001353 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001354
Peter Zijlstra502568d2010-01-22 14:35:46 +01001355 fake_cpuc->n_events = n;
1356 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001357 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001358 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001359
Peter Zijlstra502568d2010-01-22 14:35:46 +01001360 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001361
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001362 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001363
Peter Zijlstra502568d2010-01-22 14:35:46 +01001364out:
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001365 free_fake_cpuc(fake_cpuc);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001366 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001367}
1368
Yinghai Ludda99112011-01-21 15:30:01 -08001369static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001370{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001371 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001372 int err;
1373
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001374 switch (event->attr.type) {
1375 case PERF_TYPE_RAW:
1376 case PERF_TYPE_HARDWARE:
1377 case PERF_TYPE_HW_CACHE:
1378 break;
1379
1380 default:
1381 return -ENOENT;
1382 }
1383
1384 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001385 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001386 /*
1387 * we temporarily connect event to its pmu
1388 * such that validate_group() can classify
1389 * it as an x86 event using is_x86_event()
1390 */
1391 tmp = event->pmu;
1392 event->pmu = &pmu;
1393
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001394 if (event->group_leader != event)
1395 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001396 else
1397 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001398
1399 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001400 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001401 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402 if (event->destroy)
1403 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001404 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001405
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001406 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001407}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001408
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001409static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001410 .pmu_enable = x86_pmu_enable,
1411 .pmu_disable = x86_pmu_disable,
1412
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001413 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001414
1415 .add = x86_pmu_add,
1416 .del = x86_pmu_del,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001417 .start = x86_pmu_start,
1418 .stop = x86_pmu_stop,
1419 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001420
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001421 .start_txn = x86_pmu_start_txn,
1422 .cancel_txn = x86_pmu_cancel_txn,
1423 .commit_txn = x86_pmu_commit_txn,
1424};
1425
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001426/*
1427 * callchain support
1428 */
1429
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001430static int backtrace_stack(void *data, char *name)
1431{
Ingo Molnar038e8362009-06-15 09:57:59 +02001432 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001433}
1434
1435static void backtrace_address(void *data, unsigned long addr, int reliable)
1436{
1437 struct perf_callchain_entry *entry = data;
1438
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001439 perf_callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001440}
1441
1442static const struct stacktrace_ops backtrace_ops = {
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001443 .stack = backtrace_stack,
1444 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001445 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001446};
1447
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02001448void
1449perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001450{
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001451 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1452 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001453 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001454 }
1455
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001456 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001457
Namhyung Kime8e999cf2011-03-18 11:40:06 +09001458 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001459}
1460
Torok Edwin257ef9d2010-03-17 12:07:16 +02001461#ifdef CONFIG_COMPAT
1462static inline int
1463perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001464{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001465 /* 32-bit process in 64-bit kernel. */
1466 struct stack_frame_ia32 frame;
1467 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001468
Torok Edwin257ef9d2010-03-17 12:07:16 +02001469 if (!test_thread_flag(TIF_IA32))
1470 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001471
Torok Edwin257ef9d2010-03-17 12:07:16 +02001472 fp = compat_ptr(regs->bp);
1473 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1474 unsigned long bytes;
1475 frame.next_frame = 0;
1476 frame.return_address = 0;
1477
1478 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1479 if (bytes != sizeof(frame))
1480 break;
1481
1482 if (fp < compat_ptr(regs->sp))
1483 break;
1484
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001485 perf_callchain_store(entry, frame.return_address);
Torok Edwin257ef9d2010-03-17 12:07:16 +02001486 fp = compat_ptr(frame.next_frame);
1487 }
1488 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001489}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001490#else
1491static inline int
1492perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1493{
1494 return 0;
1495}
1496#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001497
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02001498void
1499perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001500{
1501 struct stack_frame frame;
1502 const void __user *fp;
1503
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001504 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1505 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001506 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001507 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001508
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001509 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001510
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001511 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001512
Andrey Vagin20afc602011-08-30 12:32:36 +04001513 if (!current->mm)
1514 return;
1515
Torok Edwin257ef9d2010-03-17 12:07:16 +02001516 if (perf_callchain_user32(regs, entry))
1517 return;
1518
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001519 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001520 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001521 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001522 frame.return_address = 0;
1523
Torok Edwin257ef9d2010-03-17 12:07:16 +02001524 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1525 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001526 break;
1527
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001528 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001529 break;
1530
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001531 perf_callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001532 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001533 }
1534}
1535
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001536unsigned long perf_instruction_pointer(struct pt_regs *regs)
1537{
1538 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001539
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001540 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1541 ip = perf_guest_cbs->get_guest_ip();
1542 else
1543 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001544
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001545 return ip;
1546}
1547
1548unsigned long perf_misc_flags(struct pt_regs *regs)
1549{
1550 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001551
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001552 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001553 if (perf_guest_cbs->is_user_mode())
1554 misc |= PERF_RECORD_MISC_GUEST_USER;
1555 else
1556 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1557 } else {
1558 if (user_mode(regs))
1559 misc |= PERF_RECORD_MISC_USER;
1560 else
1561 misc |= PERF_RECORD_MISC_KERNEL;
1562 }
1563
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001564 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02001565 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001566
1567 return misc;
1568}