blob: 019fda7489e73ce493f2a9293a6033fd488e7b09 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020025#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020030#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020031#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020032#include <asm/compat.h>
Lin Ming69092622011-03-03 10:34:50 +080033#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020034#include <asm/alternative.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010035
Peter Zijlstra7645a242010-03-08 13:51:31 +010036#if 0
37#undef wrmsrl
38#define wrmsrl(msr, val) \
39do { \
40 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
41 (unsigned long)(val)); \
42 native_write_msr((msr), (u32)((u64)(val)), \
43 (u32)((u64)(val) >> 32)); \
44} while (0)
45#endif
46
Peter Zijlstraef21f682010-03-03 13:12:23 +010047/*
Stephane Eranianefc9f052011-06-06 16:57:03 +020048 * | NHM/WSM | SNB |
49 * register -------------------------------
50 * | HT | no HT | HT | no HT |
51 *-----------------------------------------
52 * offcore | core | core | cpu | core |
53 * lbr_sel | core | core | cpu | core |
54 * ld_lat | cpu | core | cpu | core |
55 *-----------------------------------------
56 *
57 * Given that there is a small number of shared regs,
58 * we can pre-allocate their slot in the per-cpu
59 * per-core reg tables.
60 */
61enum extra_reg_type {
62 EXTRA_REG_NONE = -1, /* not used */
63
64 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
65 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
66
67 EXTRA_REG_MAX /* number of entries needed */
68};
69
70/*
Peter Zijlstraef21f682010-03-03 13:12:23 +010071 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
72 */
73static unsigned long
74copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
75{
76 unsigned long offset, addr = (unsigned long)from;
Peter Zijlstraef21f682010-03-03 13:12:23 +010077 unsigned long size, len = 0;
78 struct page *page;
79 void *map;
80 int ret;
81
82 do {
83 ret = __get_user_pages_fast(addr, 1, 0, &page);
84 if (!ret)
85 break;
86
87 offset = addr & (PAGE_SIZE - 1);
88 size = min(PAGE_SIZE - offset, n - len);
89
Peter Zijlstra7a837d12010-10-26 14:21:53 -070090 map = kmap_atomic(page);
Peter Zijlstraef21f682010-03-03 13:12:23 +010091 memcpy(to, map+offset, size);
Peter Zijlstra7a837d12010-10-26 14:21:53 -070092 kunmap_atomic(map);
Peter Zijlstraef21f682010-03-03 13:12:23 +010093 put_page(page);
94
95 len += size;
96 to += size;
97 addr += size;
98
99 } while (len < n);
100
101 return len;
102}
103
Stephane Eranian1da53e02010-01-18 10:58:01 +0200104struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100105 union {
106 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +0100107 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100108 };
Peter Zijlstrab622d642010-02-01 15:36:30 +0100109 u64 code;
110 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100111 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200112};
113
Stephane Eranian38331f62010-02-08 17:17:01 +0200114struct amd_nb {
115 int nb_id; /* NorthBridge id */
116 int refcnt; /* reference count */
117 struct perf_event *owners[X86_PMC_IDX_MAX];
118 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
119};
120
Andi Kleena7e3ed12011-03-03 10:34:47 +0800121struct intel_percore;
122
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100123#define MAX_LBR_ENTRIES 16
124
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200125struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +0100126 /*
127 * Generic x86 PMC bits
128 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200129 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200130 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Robert Richter63e6be62010-09-15 18:20:34 +0200131 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100132 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200133
134 int n_events;
135 int n_added;
Stephane Eranian90151c352010-05-25 16:23:10 +0200136 int n_txn;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200137 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200138 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200139 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100140
Lin Ming4d1c52b2010-04-23 13:56:12 +0800141 unsigned int group_flag;
142
Peter Zijlstraca037702010-03-02 19:52:12 +0100143 /*
144 * Intel DebugStore bits
145 */
146 struct debug_store *ds;
147 u64 pebs_enabled;
148
149 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100150 * Intel LBR bits
151 */
152 int lbr_users;
153 void *lbr_context;
154 struct perf_branch_stack lbr_stack;
155 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
156
157 /*
Stephane Eranianefc9f052011-06-06 16:57:03 +0200158 * manage shared (per-core, per-cpu) registers
159 * used on Intel NHM/WSM/SNB
Andi Kleena7e3ed12011-03-03 10:34:47 +0800160 */
Stephane Eranianefc9f052011-06-06 16:57:03 +0200161 struct intel_shared_regs *shared_regs;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800162
163 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100164 * AMD specific bits
165 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200166 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100167};
168
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100169#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100170 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100171 .code = (c), \
172 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100173 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100174}
Stephane Eranianb6900812009-10-06 16:42:09 +0200175
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100176#define EVENT_CONSTRAINT(c, n, m) \
177 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
178
Peter Zijlstraca037702010-03-02 19:52:12 +0100179/*
180 * Constraint on the Event code.
181 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100182#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200183 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100184
Peter Zijlstraca037702010-03-02 19:52:12 +0100185/*
186 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200187 *
188 * filter mask to validate fixed counter events.
189 * the following filters disqualify for fixed counters:
190 * - inv
191 * - edge
192 * - cnt-mask
193 * The other filters are supported by fixed counters.
194 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100195 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100196#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200197 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100198
Peter Zijlstraca037702010-03-02 19:52:12 +0100199/*
200 * Constraint on the Event code + UMask
201 */
Lin Mingb06b3d42011-03-02 21:27:04 +0800202#define INTEL_UEVENT_CONSTRAINT(c, n) \
Peter Zijlstraca037702010-03-02 19:52:12 +0100203 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
204
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100205#define EVENT_CONSTRAINT_END \
206 EVENT_CONSTRAINT(0, 0, 0)
207
208#define for_each_event_constraint(e, c) \
Robert Richtera1f2b702010-04-13 22:23:15 +0200209 for ((e) = (c); (e)->weight; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200210
Andi Kleena7e3ed12011-03-03 10:34:47 +0800211/*
Stephane Eranianefc9f052011-06-06 16:57:03 +0200212 * Per register state.
213 */
214struct er_account {
215 raw_spinlock_t lock; /* per-core: protect structure */
216 u64 config; /* extra MSR config */
217 u64 reg; /* extra MSR number */
218 atomic_t ref; /* reference count */
219};
220
221/*
Andi Kleena7e3ed12011-03-03 10:34:47 +0800222 * Extra registers for specific events.
Stephane Eranianefc9f052011-06-06 16:57:03 +0200223 *
Andi Kleena7e3ed12011-03-03 10:34:47 +0800224 * Some events need large masks and require external MSRs.
Stephane Eranianefc9f052011-06-06 16:57:03 +0200225 * Those extra MSRs end up being shared for all events on
226 * a PMU and sometimes between PMU of sibling HT threads.
227 * In either case, the kernel needs to handle conflicting
228 * accesses to those extra, shared, regs. The data structure
229 * to manage those registers is stored in cpu_hw_event.
Andi Kleena7e3ed12011-03-03 10:34:47 +0800230 */
231struct extra_reg {
232 unsigned int event;
233 unsigned int msr;
234 u64 config_mask;
235 u64 valid_mask;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200236 int idx; /* per_xxx->regs[] reg index */
Andi Kleena7e3ed12011-03-03 10:34:47 +0800237};
238
Stephane Eranianefc9f052011-06-06 16:57:03 +0200239#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
Andi Kleena7e3ed12011-03-03 10:34:47 +0800240 .event = (e), \
241 .msr = (ms), \
242 .config_mask = (m), \
243 .valid_mask = (vm), \
Stephane Eranianefc9f052011-06-06 16:57:03 +0200244 .idx = EXTRA_REG_##i \
Andi Kleena7e3ed12011-03-03 10:34:47 +0800245 }
Stephane Eranianefc9f052011-06-06 16:57:03 +0200246
247#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
248 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
249
250#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
Andi Kleena7e3ed12011-03-03 10:34:47 +0800251
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100252union perf_capabilities {
253 struct {
254 u64 lbr_format : 6;
255 u64 pebs_trap : 1;
256 u64 pebs_arch_reg : 1;
257 u64 pebs_format : 4;
258 u64 smm_freeze : 1;
259 };
260 u64 capabilities;
261};
262
Ingo Molnar241771e2008-12-03 10:39:53 +0100263/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200264 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100265 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200266struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100267 /*
268 * Generic x86 PMC bits
269 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200270 const char *name;
271 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800272 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200273 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100274 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100275 void (*enable)(struct perf_event *);
276 void (*disable)(struct perf_event *);
Cyrill Gorcunov1880c4a2011-06-23 16:49:18 +0400277 void (*hw_watchdog_set_attr)(struct perf_event_attr *attr);
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200278 int (*hw_config)(struct perf_event *event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300279 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530280 unsigned eventsel;
281 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100282 u64 (*event_map)(int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530283 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200284 int num_counters;
285 int num_counters_fixed;
286 int cntval_bits;
287 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200288 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200289 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100290 struct event_constraint *
291 (*get_event_constraints)(struct cpu_hw_events *cpuc,
292 struct perf_event *event);
293
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100294 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
295 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100296 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100297 void (*quirks)(void);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400298 int perfctr_second_write;
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100299
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100300 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100301 void (*cpu_starting)(int cpu);
302 void (*cpu_dying)(int cpu);
303 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100304
305 /*
306 * Intel Arch Perfmon v2+
307 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100308 u64 intel_ctrl;
309 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100310
311 /*
312 * Intel DebugStore bits
313 */
314 int bts, pebs;
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200315 int bts_active, pebs_active;
Peter Zijlstraca037702010-03-02 19:52:12 +0100316 int pebs_record_size;
317 void (*drain_pebs)(struct pt_regs *regs);
318 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100319
320 /*
321 * Intel LBR
322 */
323 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
324 int lbr_nr; /* hardware stack size */
Andi Kleena7e3ed12011-03-03 10:34:47 +0800325
326 /*
327 * Extra registers for events
328 */
329 struct extra_reg *extra_regs;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530330};
331
Robert Richter4a06bd82009-04-29 12:47:11 +0200332static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530333
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200334static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100335 .enabled = 1,
336};
Ingo Molnar241771e2008-12-03 10:39:53 +0100337
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100338static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200339
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530340/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200341 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200342 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200343 * 'not supported', -1 means 'hw_event makes no sense on
344 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200345 * ID.
346 */
347
348#define C(x) PERF_COUNT_HW_CACHE_##x
349
350static u64 __read_mostly hw_cache_event_ids
351 [PERF_COUNT_HW_CACHE_MAX]
352 [PERF_COUNT_HW_CACHE_OP_MAX]
353 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Andi Kleene994d7d2011-03-03 10:34:48 +0800354static u64 __read_mostly hw_cache_extra_regs
355 [PERF_COUNT_HW_CACHE_MAX]
356 [PERF_COUNT_HW_CACHE_OP_MAX]
357 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +0200358
Cyrill Gorcunov1880c4a2011-06-23 16:49:18 +0400359void hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr)
360{
361 if (x86_pmu.hw_watchdog_set_attr)
362 x86_pmu.hw_watchdog_set_attr(wd_attr);
363}
364
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530365/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200366 * Propagate event elapsed time into the generic event.
367 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100368 * Returns the delta events processed.
369 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200370static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100371x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100372{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100373 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200374 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200375 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100376 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200377 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100378
Markus Metzger30dd5682009-07-21 15:56:48 +0200379 if (idx == X86_PMC_IDX_FIXED_BTS)
380 return 0;
381
Ingo Molnaree060942008-12-13 09:00:03 +0100382 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200383 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100384 *
385 * Our tactic to handle this is to first atomically read and
386 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200387 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100388 */
389again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200390 prev_raw_count = local64_read(&hwc->prev_count);
Robert Richter73d6e522011-02-02 17:40:59 +0100391 rdmsrl(hwc->event_base, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100392
Peter Zijlstrae7850592010-05-21 14:43:08 +0200393 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +0100394 new_raw_count) != prev_raw_count)
395 goto again;
396
397 /*
398 * Now we have the new raw value and have updated the prev
399 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200400 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100401 *
402 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200403 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100404 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200405 delta = (new_raw_count << shift) - (prev_raw_count << shift);
406 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100407
Peter Zijlstrae7850592010-05-21 14:43:08 +0200408 local64_add(delta, &event->count);
409 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200410
411 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100412}
413
Robert Richter4979d272011-02-02 17:36:12 +0100414static inline int x86_pmu_addr_offset(int index)
415{
Robert Richterc8e59102011-04-16 02:27:55 +0200416 int offset;
417
418 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
419 alternative_io(ASM_NOP2,
420 "shll $1, %%eax",
421 X86_FEATURE_PERFCTR_CORE,
422 "=a" (offset),
423 "a" (index));
424
425 return offset;
Robert Richter4979d272011-02-02 17:36:12 +0100426}
427
Robert Richter41bf4982011-02-02 17:40:57 +0100428static inline unsigned int x86_pmu_config_addr(int index)
429{
Robert Richter4979d272011-02-02 17:36:12 +0100430 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
Robert Richter41bf4982011-02-02 17:40:57 +0100431}
432
433static inline unsigned int x86_pmu_event_addr(int index)
434{
Robert Richter4979d272011-02-02 17:36:12 +0100435 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
Robert Richter41bf4982011-02-02 17:40:57 +0100436}
437
Andi Kleena7e3ed12011-03-03 10:34:47 +0800438/*
439 * Find and validate any extra registers to set up.
440 */
441static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
442{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200443 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800444 struct extra_reg *er;
445
Stephane Eranianefc9f052011-06-06 16:57:03 +0200446 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800447
448 if (!x86_pmu.extra_regs)
449 return 0;
450
451 for (er = x86_pmu.extra_regs; er->msr; er++) {
452 if (er->event != (config & er->config_mask))
453 continue;
454 if (event->attr.config1 & ~er->valid_mask)
455 return -EINVAL;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200456
457 reg->idx = er->idx;
458 reg->config = event->attr.config1;
459 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800460 break;
461 }
462 return 0;
463}
464
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200465static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200466static DEFINE_MUTEX(pmc_reserve_mutex);
467
Robert Richterb27ea292010-03-17 12:49:10 +0100468#ifdef CONFIG_X86_LOCAL_APIC
469
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200470static bool reserve_pmc_hardware(void)
471{
472 int i;
473
Robert Richter948b1bb2010-03-29 18:36:50 +0200474 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100475 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200476 goto perfctr_fail;
477 }
478
Robert Richter948b1bb2010-03-29 18:36:50 +0200479 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100480 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200481 goto eventsel_fail;
482 }
483
484 return true;
485
486eventsel_fail:
487 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100488 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200489
Robert Richter948b1bb2010-03-29 18:36:50 +0200490 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200491
492perfctr_fail:
493 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100494 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200495
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200496 return false;
497}
498
499static void release_pmc_hardware(void)
500{
501 int i;
502
Robert Richter948b1bb2010-03-29 18:36:50 +0200503 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100504 release_perfctr_nmi(x86_pmu_event_addr(i));
505 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200506 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200507}
508
Robert Richterb27ea292010-03-17 12:49:10 +0100509#else
510
511static bool reserve_pmc_hardware(void) { return true; }
512static void release_pmc_hardware(void) {}
513
514#endif
515
Don Zickus33c6d6a2010-11-22 16:55:23 -0500516static bool check_hw_exists(void)
517{
518 u64 val, val_new = 0;
Peter Zijlstra44072042010-12-08 15:56:23 +0100519 int i, reg, ret = 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500520
Peter Zijlstra44072042010-12-08 15:56:23 +0100521 /*
522 * Check to see if the BIOS enabled any of the counters, if so
523 * complain and bail.
524 */
525 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100526 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100527 ret = rdmsrl_safe(reg, &val);
528 if (ret)
529 goto msr_fail;
530 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
531 goto bios_fail;
532 }
533
534 if (x86_pmu.num_counters_fixed) {
535 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
536 ret = rdmsrl_safe(reg, &val);
537 if (ret)
538 goto msr_fail;
539 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
540 if (val & (0x03 << i*4))
541 goto bios_fail;
542 }
543 }
544
545 /*
546 * Now write a value and read it back to see if it matches,
547 * this is needed to detect certain hardware emulators (qemu/kvm)
548 * that don't trap on the MSR access and always return 0s.
549 */
Don Zickus33c6d6a2010-11-22 16:55:23 -0500550 val = 0xabcdUL;
Robert Richter41bf4982011-02-02 17:40:57 +0100551 ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
552 ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500553 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100554 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500555
556 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100557
558bios_fail:
Ingo Molnar45daae52011-03-25 10:24:23 +0100559 /*
560 * We still allow the PMU driver to operate:
561 */
562 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
Peter Zijlstra44072042010-12-08 15:56:23 +0100563 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
Ingo Molnar45daae52011-03-25 10:24:23 +0100564
565 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100566
567msr_fail:
568 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
Ingo Molnar45daae52011-03-25 10:24:23 +0100569
Peter Zijlstra44072042010-12-08 15:56:23 +0100570 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500571}
572
Peter Zijlstraf80c9e32010-10-19 14:50:02 +0200573static void reserve_ds_buffers(void);
Peter Zijlstraca037702010-03-02 19:52:12 +0100574static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200575
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200576static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200577{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200578 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200579 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100580 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200581 mutex_unlock(&pmc_reserve_mutex);
582 }
583}
584
Robert Richter85cf9db2009-04-29 12:47:20 +0200585static inline int x86_pmu_initialized(void)
586{
587 return x86_pmu.handle_irq != NULL;
588}
589
Ingo Molnar8326f442009-06-05 20:22:46 +0200590static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800591set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200592{
Andi Kleene994d7d2011-03-03 10:34:48 +0800593 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200594 unsigned int cache_type, cache_op, cache_result;
595 u64 config, val;
596
597 config = attr->config;
598
599 cache_type = (config >> 0) & 0xff;
600 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
601 return -EINVAL;
602
603 cache_op = (config >> 8) & 0xff;
604 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
605 return -EINVAL;
606
607 cache_result = (config >> 16) & 0xff;
608 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
609 return -EINVAL;
610
611 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
612
613 if (val == 0)
614 return -ENOENT;
615
616 if (val == -1)
617 return -EINVAL;
618
619 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800620 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
621 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200622}
623
Robert Richterc1726f32010-04-13 22:23:11 +0200624static int x86_setup_perfctr(struct perf_event *event)
625{
626 struct perf_event_attr *attr = &event->attr;
627 struct hw_perf_event *hwc = &event->hw;
628 u64 config;
629
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100630 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200631 hwc->sample_period = x86_pmu.max_period;
632 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200633 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200634 } else {
635 /*
636 * If we have a PMU initialized but no APIC
637 * interrupts, we cannot sample hardware
638 * events (user-space has to fall back and
639 * sample via a hrtimer based software event):
640 */
641 if (!x86_pmu.apic)
642 return -EOPNOTSUPP;
643 }
644
Ingo Molnarb52c55c2011-04-22 08:44:38 +0200645 /*
646 * Do not allow config1 (extended registers) to propagate,
647 * there's no sane user-space generalization yet:
648 */
Robert Richterc1726f32010-04-13 22:23:11 +0200649 if (attr->type == PERF_TYPE_RAW)
Ingo Molnarb52c55c2011-04-22 08:44:38 +0200650 return 0;
Robert Richterc1726f32010-04-13 22:23:11 +0200651
652 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800653 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200654
655 if (attr->config >= x86_pmu.max_events)
656 return -EINVAL;
657
658 /*
659 * The generic map:
660 */
661 config = x86_pmu.event_map(attr->config);
662
663 if (config == 0)
664 return -ENOENT;
665
666 if (config == -1LL)
667 return -EINVAL;
668
669 /*
670 * Branch tracing:
671 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200672 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
673 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200674 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200675 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200676 return -EOPNOTSUPP;
677
678 /* BTS is currently only allowed for user-mode. */
679 if (!attr->exclude_kernel)
680 return -EOPNOTSUPP;
681 }
682
683 hwc->config |= config;
684
685 return 0;
686}
Robert Richter4261e0e2010-04-13 22:23:10 +0200687
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200688static int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300689{
Peter Zijlstraab608342010-04-08 23:03:20 +0200690 if (event->attr.precise_ip) {
691 int precise = 0;
692
693 /* Support for constant skid */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200694 if (x86_pmu.pebs_active) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200695 precise++;
696
Peter Zijlstra5553be22010-10-19 14:38:11 +0200697 /* Support for IP fixup */
698 if (x86_pmu.lbr_nr)
699 precise++;
700 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200701
702 if (event->attr.precise_ip > precise)
703 return -EOPNOTSUPP;
704 }
705
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300706 /*
707 * Generate PMC IRQs:
708 * (keep 'enabled' bit clear for now)
709 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200710 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300711
712 /*
713 * Count user and OS events unless requested not to
714 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200715 if (!event->attr.exclude_user)
716 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
717 if (!event->attr.exclude_kernel)
718 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
719
720 if (event->attr.type == PERF_TYPE_RAW)
721 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300722
Robert Richter9d0fcba62010-04-13 22:23:12 +0200723 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300724}
725
Ingo Molnaree060942008-12-13 09:00:03 +0100726/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200727 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100728 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200729static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100730{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200731 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100732
Robert Richter85cf9db2009-04-29 12:47:20 +0200733 if (!x86_pmu_initialized())
734 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100735
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200736 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200737 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200738 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200739 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200740 if (!reserve_pmc_hardware())
741 err = -EBUSY;
Peter Zijlstraf80c9e32010-10-19 14:50:02 +0200742 else
743 reserve_ds_buffers();
Markus Metzger30dd5682009-07-21 15:56:48 +0200744 }
745 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200746 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200747 mutex_unlock(&pmc_reserve_mutex);
748 }
749 if (err)
750 return err;
751
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200752 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200753
Robert Richter4261e0e2010-04-13 22:23:10 +0200754 event->hw.idx = -1;
755 event->hw.last_cpu = -1;
756 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200757
Stephane Eranianefc9f052011-06-06 16:57:03 +0200758 /* mark unused */
759 event->hw.extra_reg.idx = EXTRA_REG_NONE;
760
Robert Richter9d0fcba62010-04-13 22:23:12 +0200761 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200762}
763
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100764static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530765{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200766 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200767 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100768
Robert Richter948b1bb2010-03-29 18:36:50 +0200769 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100770 u64 val;
771
Robert Richter43f62012009-04-29 16:55:56 +0200772 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200773 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100774 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100775 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200776 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100777 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100778 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530779 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530780}
781
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200782static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530783{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200784 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
785
Robert Richter85cf9db2009-04-29 12:47:20 +0200786 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200787 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200788
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100789 if (!cpuc->enabled)
790 return;
791
792 cpuc->n_added = 0;
793 cpuc->enabled = 0;
794 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200795
796 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530797}
Ingo Molnar241771e2008-12-03 10:39:53 +0100798
Robert Richterd45dd922011-02-02 17:40:56 +0100799static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
800 u64 enable_mask)
801{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200802 if (hwc->extra_reg.reg)
803 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
Robert Richter73d6e522011-02-02 17:40:59 +0100804 wrmsrl(hwc->config_base, hwc->config | enable_mask);
Robert Richterd45dd922011-02-02 17:40:56 +0100805}
806
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100807static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530808{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200809 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530810 int idx;
811
Robert Richter948b1bb2010-03-29 18:36:50 +0200812 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100813 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100814
Robert Richter43f62012009-04-29 16:55:56 +0200815 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200816 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200817
Robert Richterd45dd922011-02-02 17:40:56 +0100818 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530819 }
820}
821
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200822static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200823
824static inline int is_x86_event(struct perf_event *event)
825{
826 return event->pmu == &pmu;
827}
828
829static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
830{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100831 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200832 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100833 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200834 struct hw_perf_event *hwc;
835
836 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
837
838 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100839 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
840 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200841 }
842
843 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200844 * fastpath, try to reuse previous register
845 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100846 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200847 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100848 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200849
850 /* never assigned */
851 if (hwc->idx == -1)
852 break;
853
854 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100855 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200856 break;
857
858 /* not already used */
859 if (test_bit(hwc->idx, used_mask))
860 break;
861
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100862 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200863 if (assign)
864 assign[i] = hwc->idx;
865 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100866 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200867 goto done;
868
869 /*
870 * begin slow path
871 */
872
873 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
874
875 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200876 * weight = number of possible counters
877 *
878 * 1 = most constrained, only works on one counter
879 * wmax = least constrained, works on any counter
880 *
881 * assign events to counters starting with most
882 * constrained events.
883 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200884 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200885
886 /*
887 * when fixed event counters are present,
888 * wmax is incremented by 1 to account
889 * for one more choice
890 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200891 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200892 wmax++;
893
Stephane Eranian81130702010-01-21 17:39:01 +0200894 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200895 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200896 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100897 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200898 hwc = &cpuc->event_list[i]->hw;
899
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100900 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200901 continue;
902
Akinobu Mita984b3f52010-03-05 13:41:37 -0800903 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200904 if (!test_bit(j, used_mask))
905 break;
906 }
907
908 if (j == X86_PMC_IDX_MAX)
909 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200910
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100911 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200912
Stephane Eranian1da53e02010-01-18 10:58:01 +0200913 if (assign)
914 assign[i] = j;
915 num--;
916 }
917 }
Stephane Eranian81130702010-01-21 17:39:01 +0200918done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200919 /*
920 * scheduling failed or is just a simulation,
921 * free resources if necessary
922 */
923 if (!assign || num) {
924 for (i = 0; i < n; i++) {
925 if (x86_pmu.put_event_constraints)
926 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
927 }
928 }
929 return num ? -ENOSPC : 0;
930}
931
932/*
933 * dogrp: true if must collect siblings events (group)
934 * returns total number of events and error code
935 */
936static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
937{
938 struct perf_event *event;
939 int n, max_count;
940
Robert Richter948b1bb2010-03-29 18:36:50 +0200941 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200942
943 /* current number of events already accepted */
944 n = cpuc->n_events;
945
946 if (is_x86_event(leader)) {
947 if (n >= max_count)
948 return -ENOSPC;
949 cpuc->event_list[n] = leader;
950 n++;
951 }
952 if (!dogrp)
953 return n;
954
955 list_for_each_entry(event, &leader->sibling_list, group_entry) {
956 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200957 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200958 continue;
959
960 if (n >= max_count)
961 return -ENOSPC;
962
963 cpuc->event_list[n] = event;
964 n++;
965 }
966 return n;
967}
968
Stephane Eranian1da53e02010-01-18 10:58:01 +0200969static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200970 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200971{
Stephane Eranian447a1942010-02-01 14:50:01 +0200972 struct hw_perf_event *hwc = &event->hw;
973
974 hwc->idx = cpuc->assign[i];
975 hwc->last_cpu = smp_processor_id();
976 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200977
978 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
979 hwc->config_base = 0;
980 hwc->event_base = 0;
981 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
982 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Stephane Eranianfc66c522011-03-19 18:20:05 +0100983 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200984 } else {
Robert Richter73d6e522011-02-02 17:40:59 +0100985 hwc->config_base = x86_pmu_config_addr(hwc->idx);
986 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200987 }
988}
989
Stephane Eranian447a1942010-02-01 14:50:01 +0200990static inline int match_prev_assignment(struct hw_perf_event *hwc,
991 struct cpu_hw_events *cpuc,
992 int i)
993{
994 return hwc->idx == cpuc->assign[i] &&
995 hwc->last_cpu == smp_processor_id() &&
996 hwc->last_tag == cpuc->tags[i];
997}
998
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200999static void x86_pmu_start(struct perf_event *event, int flags);
1000static void x86_pmu_stop(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001001
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001002static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +01001003{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 struct perf_event *event;
1006 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001007 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001008
Robert Richter85cf9db2009-04-29 12:47:20 +02001009 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001010 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001011
1012 if (cpuc->enabled)
1013 return;
1014
Stephane Eranian1da53e02010-01-18 10:58:01 +02001015 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001016 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001017 /*
1018 * apply assignment obtained either from
1019 * hw_perf_group_sched_in() or x86_pmu_enable()
1020 *
1021 * step1: save events moving to new counters
1022 * step2: reprogram moved events into new counters
1023 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001024 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001025 event = cpuc->event_list[i];
1026 hwc = &event->hw;
1027
Stephane Eranian447a1942010-02-01 14:50:01 +02001028 /*
1029 * we can avoid reprogramming counter if:
1030 * - assigned same counter as last time
1031 * - running on same CPU as last time
1032 * - no other event has used the counter since
1033 */
1034 if (hwc->idx == -1 ||
1035 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +02001036 continue;
1037
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001038 /*
1039 * Ensure we don't accidentally enable a stopped
1040 * counter simply because we rescheduled.
1041 */
1042 if (hwc->state & PERF_HES_STOPPED)
1043 hwc->state |= PERF_HES_ARCH;
1044
1045 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001046 }
1047
1048 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001049 event = cpuc->event_list[i];
1050 hwc = &event->hw;
1051
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001052 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +02001053 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001054 else if (i < n_running)
1055 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001056
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001057 if (hwc->state & PERF_HES_ARCH)
1058 continue;
1059
1060 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001061 }
1062 cpuc->n_added = 0;
1063 perf_events_lapic_init();
1064 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001065
1066 cpuc->enabled = 1;
1067 barrier();
1068
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001069 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +01001070}
Ingo Molnaree060942008-12-13 09:00:03 +01001071
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001072static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001073{
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001074 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001075
Robert Richter73d6e522011-02-02 17:40:59 +01001076 wrmsrl(hwc->config_base, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001077}
1078
Tejun Heo245b2e72009-06-24 15:13:48 +09001079static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001080
Ingo Molnaree060942008-12-13 09:00:03 +01001081/*
1082 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001083 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001084 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001085static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001086x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001087{
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001088 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001089 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001090 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001091 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001092
Markus Metzger30dd5682009-07-21 15:56:48 +02001093 if (idx == X86_PMC_IDX_FIXED_BTS)
1094 return 0;
1095
Ingo Molnaree060942008-12-13 09:00:03 +01001096 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001097 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001098 */
1099 if (unlikely(left <= -period)) {
1100 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001101 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001102 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001103 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001104 }
1105
1106 if (unlikely(left <= 0)) {
1107 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001108 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001109 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001110 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001111 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001112 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001113 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001114 */
1115 if (unlikely(left < 2))
1116 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001117
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001118 if (left > x86_pmu.max_period)
1119 left = x86_pmu.max_period;
1120
Tejun Heo245b2e72009-06-24 15:13:48 +09001121 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001122
1123 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001125 * mark it to be able to extra future deltas:
1126 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001127 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001128
Robert Richter73d6e522011-02-02 17:40:59 +01001129 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001130
1131 /*
1132 * Due to erratum on certan cpu we need
1133 * a second write to be sure the register
1134 * is updated properly
1135 */
1136 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +01001137 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +02001138 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001139 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001140
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001141 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001142
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001143 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001144}
1145
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001146static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +02001147{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001148 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +02001149 __x86_pmu_enable_event(&event->hw,
1150 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +01001151}
1152
Ingo Molnaree060942008-12-13 09:00:03 +01001153/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001154 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +02001155 *
1156 * The event is added to the group of enabled events
1157 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001158 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001159static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001160{
1161 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001162 struct hw_perf_event *hwc;
1163 int assign[X86_PMC_IDX_MAX];
1164 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001165
Stephane Eranian1da53e02010-01-18 10:58:01 +02001166 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001167
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001168 perf_pmu_disable(event->pmu);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001169 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001170 ret = n = collect_events(cpuc, event, false);
1171 if (ret < 0)
1172 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001173
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001174 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1175 if (!(flags & PERF_EF_START))
1176 hwc->state |= PERF_HES_ARCH;
1177
Lin Ming4d1c52b2010-04-23 13:56:12 +08001178 /*
1179 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001180 * skip the schedulability test here, it will be performed
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001181 * at commit time (->commit_txn) as a whole
Lin Ming4d1c52b2010-04-23 13:56:12 +08001182 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001183 if (cpuc->group_flag & PERF_EVENT_TXN)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001184 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001185
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001186 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001187 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001188 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001189 /*
1190 * copy new assignment, now we know it is possible
1191 * will be used by hw_perf_enable()
1192 */
1193 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001194
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001195done_collect:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001196 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001197 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +02001198 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001199
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001200 ret = 0;
1201out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001202 perf_pmu_enable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001203 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001204}
1205
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001206static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001207{
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001208 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1209 int idx = event->hw.idx;
1210
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001211 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1212 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +02001213
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001214 if (WARN_ON_ONCE(idx == -1))
1215 return;
1216
1217 if (flags & PERF_EF_RELOAD) {
1218 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1219 x86_perf_event_set_period(event);
1220 }
1221
1222 event->hw.state = 0;
1223
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001224 cpuc->events[idx] = event;
1225 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +02001226 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001227 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001228 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001229}
1230
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001231void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001232{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001233 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +01001234 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001235 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001236 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001237 int cpu, idx;
1238
Robert Richter948b1bb2010-03-29 18:36:50 +02001239 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001240 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001241
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001242 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001243
1244 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001245 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001246
Robert Richterfaa28ae2009-04-29 12:47:13 +02001247 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301248 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1249 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1250 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1251 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001252 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001253
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301254 pr_info("\n");
1255 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1256 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1257 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1258 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001259 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301260 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001261 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001262
Robert Richter948b1bb2010-03-29 18:36:50 +02001263 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +01001264 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1265 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001266
Tejun Heo245b2e72009-06-24 15:13:48 +09001267 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001268
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301269 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001270 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301271 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001272 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301273 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001274 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001275 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001276 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001277 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1278
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301279 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001280 cpu, idx, pmc_count);
1281 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001282 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001283}
1284
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001285static void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +01001286{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001287 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001288 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001289
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001290 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1291 x86_pmu.disable(event);
1292 cpuc->events[hwc->idx] = NULL;
1293 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1294 hwc->state |= PERF_HES_STOPPED;
1295 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001296
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001297 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1298 /*
1299 * Drain the remaining delta count out of a event
1300 * that we are disabling:
1301 */
1302 x86_perf_event_update(event);
1303 hwc->state |= PERF_HES_UPTODATE;
1304 }
Peter Zijlstra2e841872010-01-25 15:58:43 +01001305}
1306
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001307static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +01001308{
1309 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1310 int i;
1311
Stephane Eranian90151c352010-05-25 16:23:10 +02001312 /*
1313 * If we're called during a txn, we don't need to do anything.
1314 * The events never got scheduled and ->cancel_txn will truncate
1315 * the event_list.
1316 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001317 if (cpuc->group_flag & PERF_EVENT_TXN)
Stephane Eranian90151c352010-05-25 16:23:10 +02001318 return;
1319
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001320 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001321
Stephane Eranian1da53e02010-01-18 10:58:01 +02001322 for (i = 0; i < cpuc->n_events; i++) {
1323 if (event == cpuc->event_list[i]) {
1324
1325 if (x86_pmu.put_event_constraints)
1326 x86_pmu.put_event_constraints(cpuc, event);
1327
1328 while (++i < cpuc->n_events)
1329 cpuc->event_list[i-1] = cpuc->event_list[i];
1330
1331 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001332 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001333 }
1334 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001335 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001336}
1337
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001338static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001339{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001340 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001341 struct cpu_hw_events *cpuc;
1342 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -04001343 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001344 u64 val;
1345
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001346 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001347
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001348 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001349
Don Zickus2bce5da2011-04-27 06:32:33 -04001350 /*
1351 * Some chipsets need to unmask the LVTPC in a particular spot
1352 * inside the nmi handler. As a result, the unmasking was pushed
1353 * into all the nmi handlers.
1354 *
1355 * This generic handler doesn't seem to have any issues where the
1356 * unmasking occurs so it was left at the top.
1357 */
1358 apic_write(APIC_LVTPC, APIC_DM_NMI);
1359
Robert Richter948b1bb2010-03-29 18:36:50 +02001360 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001361 if (!test_bit(idx, cpuc->active_mask)) {
1362 /*
1363 * Though we deactivated the counter some cpus
1364 * might still deliver spurious interrupts still
1365 * in flight. Catch them:
1366 */
1367 if (__test_and_clear_bit(idx, cpuc->running))
1368 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001369 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001370 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001371
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001372 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001373
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001374 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001375 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001376 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001377
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001378 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001379 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001380 */
Robert Richter4177c422010-09-02 15:07:48 -04001381 handled++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001382 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001383
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001384 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001385 continue;
1386
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001387 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001388 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001389 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001390
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001391 if (handled)
1392 inc_irq_stat(apic_perf_irqs);
1393
Robert Richtera29aa8a2009-04-29 12:47:21 +02001394 return handled;
1395}
Robert Richter39d81ea2009-04-29 12:47:05 +02001396
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001397void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001398{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001399 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001400 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001401
Ingo Molnar241771e2008-12-03 10:39:53 +01001402 /*
Yong Wangc323d952009-05-29 13:28:35 +08001403 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001404 */
Yong Wangc323d952009-05-29 13:28:35 +08001405 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001406}
1407
Robert Richter4177c422010-09-02 15:07:48 -04001408struct pmu_nmi_state {
1409 unsigned int marked;
1410 int handled;
1411};
1412
1413static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1414
Ingo Molnar241771e2008-12-03 10:39:53 +01001415static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001416perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001417 unsigned long cmd, void *__args)
1418{
1419 struct die_args *args = __args;
Robert Richter4177c422010-09-02 15:07:48 -04001420 unsigned int this_nmi;
1421 int handled;
Ingo Molnar241771e2008-12-03 10:39:53 +01001422
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001424 return NOTIFY_DONE;
1425
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001426 switch (cmd) {
1427 case DIE_NMI:
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001428 break;
Robert Richter4177c422010-09-02 15:07:48 -04001429 case DIE_NMIUNKNOWN:
1430 this_nmi = percpu_read(irq_stat.__nmi_count);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001431 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
Robert Richter4177c422010-09-02 15:07:48 -04001432 /* let the kernel handle the unknown nmi */
1433 return NOTIFY_DONE;
1434 /*
1435 * This one is a PMU back-to-back nmi. Two events
1436 * trigger 'simultaneously' raising two back-to-back
1437 * NMIs. If the first NMI handles both, the latter
1438 * will be empty and daze the CPU. So, we drop it to
1439 * avoid false-positive 'unknown nmi' messages.
1440 */
1441 return NOTIFY_STOP;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001442 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001443 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001444 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001445
Robert Richter4177c422010-09-02 15:07:48 -04001446 handled = x86_pmu.handle_irq(args->regs);
1447 if (!handled)
1448 return NOTIFY_DONE;
1449
1450 this_nmi = percpu_read(irq_stat.__nmi_count);
1451 if ((handled > 1) ||
1452 /* the next nmi could be a back-to-back nmi */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001453 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1454 (__this_cpu_read(pmu_nmi.handled) > 1))) {
Robert Richter4177c422010-09-02 15:07:48 -04001455 /*
1456 * We could have two subsequent back-to-back nmis: The
1457 * first handles more than one counter, the 2nd
1458 * handles only one counter and the 3rd handles no
1459 * counter.
1460 *
1461 * This is the 2nd nmi because the previous was
1462 * handling more than one counter. We will mark the
1463 * next (3rd) and then drop it if unhandled.
1464 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001465 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1466 __this_cpu_write(pmu_nmi.handled, handled);
Robert Richter4177c422010-09-02 15:07:48 -04001467 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001468
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001469 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001470}
1471
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001472static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1473 .notifier_call = perf_event_nmi_handler,
1474 .next = NULL,
Don Zickus166d7512011-01-06 16:18:49 -05001475 .priority = NMI_LOCAL_LOW_PRIOR,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001476};
1477
Peter Zijlstra63b14642010-01-22 16:32:17 +01001478static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001479static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001480
Peter Zijlstra63b14642010-01-22 16:32:17 +01001481static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001482x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001483{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001484 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001485
Stephane Eranian1da53e02010-01-18 10:58:01 +02001486 if (x86_pmu.event_constraints) {
1487 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001488 if ((event->hw.config & c->cmask) == c->code)
1489 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001490 }
1491 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001492
1493 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001494}
1495
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001496#include "perf_event_amd.c"
1497#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001498#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001499#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001500#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001501#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301502
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001503static int __cpuinit
1504x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1505{
1506 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001507 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001508
1509 switch (action & ~CPU_TASKS_FROZEN) {
1510 case CPU_UP_PREPARE:
1511 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001512 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001513 break;
1514
1515 case CPU_STARTING:
1516 if (x86_pmu.cpu_starting)
1517 x86_pmu.cpu_starting(cpu);
1518 break;
1519
1520 case CPU_DYING:
1521 if (x86_pmu.cpu_dying)
1522 x86_pmu.cpu_dying(cpu);
1523 break;
1524
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001525 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001526 case CPU_DEAD:
1527 if (x86_pmu.cpu_dead)
1528 x86_pmu.cpu_dead(cpu);
1529 break;
1530
1531 default:
1532 break;
1533 }
1534
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001535 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001536}
1537
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001538static void __init pmu_check_apic(void)
1539{
1540 if (cpu_has_apic)
1541 return;
1542
1543 x86_pmu.apic = 0;
1544 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1545 pr_info("no hardware sampling interrupt available.\n");
1546}
1547
Yinghai Ludda99112011-01-21 15:30:01 -08001548static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301549{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001550 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001551 int err;
1552
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001553 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001554
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301555 switch (boot_cpu_data.x86_vendor) {
1556 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001557 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301558 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301559 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001560 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301561 break;
Robert Richter41389602009-04-29 12:47:00 +02001562 default:
Peter Zijlstra004417a2010-11-25 18:38:29 +01001563 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301564 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001565 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001566 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001567 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001568 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301569
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001570 pmu_check_apic();
1571
Don Zickus33c6d6a2010-11-22 16:55:23 -05001572 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001573 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001574 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001575
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001576 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001577
Peter Zijlstra3c447802010-03-04 21:49:01 +01001578 if (x86_pmu.quirks)
1579 x86_pmu.quirks();
1580
Robert Richter948b1bb2010-03-29 18:36:50 +02001581 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001582 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001583 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1584 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001585 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001586 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar241771e2008-12-03 10:39:53 +01001587
Robert Richter948b1bb2010-03-29 18:36:50 +02001588 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001589 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001590 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1591 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001592 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001593
Robert Richterd6dc0b42010-03-17 12:49:13 +01001594 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001595 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001596
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597 perf_events_lapic_init();
1598 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001599
Peter Zijlstra63b14642010-01-22 16:32:17 +01001600 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001601 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1602 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001603
Peter Zijlstrab622d642010-02-01 15:36:30 +01001604 if (x86_pmu.event_constraints) {
1605 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001606 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001607 continue;
1608
Robert Richter948b1bb2010-03-29 18:36:50 +02001609 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1610 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001611 }
1612 }
1613
Ingo Molnar57c0c152009-09-21 12:20:38 +02001614 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001615 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1616 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1617 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001618 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001619 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001620 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001621
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001622 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001623 perf_cpu_notifier(x86_pmu_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001624
1625 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001626}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001627early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001628
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001629static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001630{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001631 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001632}
1633
Lin Ming4d1c52b2010-04-23 13:56:12 +08001634/*
1635 * Start group events scheduling transaction
1636 * Set the flag to make pmu::enable() not perform the
1637 * schedulability test, it will be performed at commit time
1638 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001639static void x86_pmu_start_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001640{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001641 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001642 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1643 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001644}
1645
1646/*
1647 * Stop group events scheduling transaction
1648 * Clear the flag and pmu::enable() will perform the
1649 * schedulability test.
1650 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001651static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001652{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001653 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
Stephane Eranian90151c352010-05-25 16:23:10 +02001654 /*
1655 * Truncate the collected events.
1656 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001657 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1658 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001659 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001660}
1661
1662/*
1663 * Commit group events scheduling transaction
1664 * Perform the group schedulability test as a whole
1665 * Return 0 if success
1666 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001667static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001668{
1669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1670 int assign[X86_PMC_IDX_MAX];
1671 int n, ret;
1672
1673 n = cpuc->n_events;
1674
1675 if (!x86_pmu_initialized())
1676 return -EAGAIN;
1677
1678 ret = x86_pmu.schedule_events(cpuc, n, assign);
1679 if (ret)
1680 return ret;
1681
1682 /*
1683 * copy new assignment, now we know it is possible
1684 * will be used by hw_perf_enable()
1685 */
1686 memcpy(cpuc->assign, assign, n*sizeof(int));
1687
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001688 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001689 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001690 return 0;
1691}
1692
Stephane Eranian1da53e02010-01-18 10:58:01 +02001693/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001694 * validate that we can schedule this event
1695 */
1696static int validate_event(struct perf_event *event)
1697{
1698 struct cpu_hw_events *fake_cpuc;
1699 struct event_constraint *c;
1700 int ret = 0;
1701
1702 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1703 if (!fake_cpuc)
1704 return -ENOMEM;
1705
1706 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1707
1708 if (!c || !c->weight)
1709 ret = -ENOSPC;
1710
1711 if (x86_pmu.put_event_constraints)
1712 x86_pmu.put_event_constraints(fake_cpuc, event);
1713
1714 kfree(fake_cpuc);
1715
1716 return ret;
1717}
1718
1719/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001720 * validate a single event group
1721 *
1722 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001723 * - check events are compatible which each other
1724 * - events do not compete for the same counter
1725 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001726 *
1727 * validation ensures the group can be loaded onto the
1728 * PMU if it was the only group available.
1729 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001730static int validate_group(struct perf_event *event)
1731{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001732 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001733 struct cpu_hw_events *fake_cpuc;
1734 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001735
Peter Zijlstra502568d2010-01-22 14:35:46 +01001736 ret = -ENOMEM;
1737 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1738 if (!fake_cpuc)
1739 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001740 /*
1741 * the event is not yet connected with its
1742 * siblings therefore we must first collect
1743 * existing siblings, then add the new event
1744 * before we can simulate the scheduling
1745 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001746 ret = -ENOSPC;
1747 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001748 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001749 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001750
Peter Zijlstra502568d2010-01-22 14:35:46 +01001751 fake_cpuc->n_events = n;
1752 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001753 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001754 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001755
Peter Zijlstra502568d2010-01-22 14:35:46 +01001756 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001757
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001758 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001759
1760out_free:
1761 kfree(fake_cpuc);
1762out:
1763 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001764}
1765
Yinghai Ludda99112011-01-21 15:30:01 -08001766static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001767{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001768 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001769 int err;
1770
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001771 switch (event->attr.type) {
1772 case PERF_TYPE_RAW:
1773 case PERF_TYPE_HARDWARE:
1774 case PERF_TYPE_HW_CACHE:
1775 break;
1776
1777 default:
1778 return -ENOENT;
1779 }
1780
1781 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001782 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001783 /*
1784 * we temporarily connect event to its pmu
1785 * such that validate_group() can classify
1786 * it as an x86 event using is_x86_event()
1787 */
1788 tmp = event->pmu;
1789 event->pmu = &pmu;
1790
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001791 if (event->group_leader != event)
1792 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001793 else
1794 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001795
1796 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001797 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001798 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001799 if (event->destroy)
1800 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001801 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001802
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001803 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001804}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001805
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001806static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001807 .pmu_enable = x86_pmu_enable,
1808 .pmu_disable = x86_pmu_disable,
1809
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001810 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001811
1812 .add = x86_pmu_add,
1813 .del = x86_pmu_del,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001814 .start = x86_pmu_start,
1815 .stop = x86_pmu_stop,
1816 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001817
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001818 .start_txn = x86_pmu_start_txn,
1819 .cancel_txn = x86_pmu_cancel_txn,
1820 .commit_txn = x86_pmu_commit_txn,
1821};
1822
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001823/*
1824 * callchain support
1825 */
1826
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001827static int backtrace_stack(void *data, char *name)
1828{
Ingo Molnar038e8362009-06-15 09:57:59 +02001829 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001830}
1831
1832static void backtrace_address(void *data, unsigned long addr, int reliable)
1833{
1834 struct perf_callchain_entry *entry = data;
1835
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001836 perf_callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001837}
1838
1839static const struct stacktrace_ops backtrace_ops = {
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001840 .stack = backtrace_stack,
1841 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001842 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001843};
1844
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02001845void
1846perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001847{
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001848 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1849 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001850 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001851 }
1852
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001853 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001854
Namhyung Kime8e999cf2011-03-18 11:40:06 +09001855 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001856}
1857
Torok Edwin257ef9d2010-03-17 12:07:16 +02001858#ifdef CONFIG_COMPAT
1859static inline int
1860perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001861{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001862 /* 32-bit process in 64-bit kernel. */
1863 struct stack_frame_ia32 frame;
1864 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001865
Torok Edwin257ef9d2010-03-17 12:07:16 +02001866 if (!test_thread_flag(TIF_IA32))
1867 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001868
Torok Edwin257ef9d2010-03-17 12:07:16 +02001869 fp = compat_ptr(regs->bp);
1870 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1871 unsigned long bytes;
1872 frame.next_frame = 0;
1873 frame.return_address = 0;
1874
1875 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1876 if (bytes != sizeof(frame))
1877 break;
1878
1879 if (fp < compat_ptr(regs->sp))
1880 break;
1881
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001882 perf_callchain_store(entry, frame.return_address);
Torok Edwin257ef9d2010-03-17 12:07:16 +02001883 fp = compat_ptr(frame.next_frame);
1884 }
1885 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001886}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001887#else
1888static inline int
1889perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1890{
1891 return 0;
1892}
1893#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001894
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02001895void
1896perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001897{
1898 struct stack_frame frame;
1899 const void __user *fp;
1900
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001901 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1902 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001903 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001904 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001905
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001906 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001907
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001908 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001909
Torok Edwin257ef9d2010-03-17 12:07:16 +02001910 if (perf_callchain_user32(regs, entry))
1911 return;
1912
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001913 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001914 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001915 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001916 frame.return_address = 0;
1917
Torok Edwin257ef9d2010-03-17 12:07:16 +02001918 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1919 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001920 break;
1921
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001922 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001923 break;
1924
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001925 perf_callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001926 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001927 }
1928}
1929
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001930unsigned long perf_instruction_pointer(struct pt_regs *regs)
1931{
1932 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001933
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001934 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1935 ip = perf_guest_cbs->get_guest_ip();
1936 else
1937 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001938
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001939 return ip;
1940}
1941
1942unsigned long perf_misc_flags(struct pt_regs *regs)
1943{
1944 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001945
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001946 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001947 if (perf_guest_cbs->is_user_mode())
1948 misc |= PERF_RECORD_MISC_GUEST_USER;
1949 else
1950 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1951 } else {
1952 if (user_mode(regs))
1953 misc |= PERF_RECORD_MISC_USER;
1954 else
1955 misc |= PERF_RECORD_MISC_KERNEL;
1956 }
1957
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001958 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02001959 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001960
1961 return misc;
1962}