blob: 921bbf732e77c9fb15e40a4139c5c99f6a11ce8e [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010026
Ingo Molnar241771e2008-12-03 10:39:53 +010027#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020028#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020029#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010030
Ingo Molnarcdd6c482009-09-21 12:02:48 +020031static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010032
Ingo Molnarcdd6c482009-09-21 12:02:48 +020033/* The maximal number of PEBS events: */
34#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020035
36/* The size of a BTS record in bytes: */
37#define BTS_RECORD_SIZE 24
38
39/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020040#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020041
42/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020043#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020044
45
46/*
47 * Bits in the debugctlmsr controlling branch tracing.
48 */
49#define X86_DEBUGCTL_TR (1 << 6)
50#define X86_DEBUGCTL_BTS (1 << 7)
51#define X86_DEBUGCTL_BTINT (1 << 8)
52#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
53#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
54
55/*
56 * A debug store configuration.
57 *
58 * We only support architectures that use 64bit fields.
59 */
60struct debug_store {
61 u64 bts_buffer_base;
62 u64 bts_index;
63 u64 bts_absolute_maximum;
64 u64 bts_interrupt_threshold;
65 u64 pebs_buffer_base;
66 u64 pebs_index;
67 u64 pebs_absolute_maximum;
68 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020069 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020070};
71
Stephane Eranian1da53e02010-01-18 10:58:01 +020072struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010073 union {
74 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 u64 idxmsk64[1];
76 };
Stephane Eranian1da53e02010-01-18 10:58:01 +020077 int code;
78 int cmask;
79};
80
Ingo Molnarcdd6c482009-09-21 12:02:48 +020081struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020082 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020083 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010084 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010085 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020086 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020087
88 int n_events;
89 int n_added;
90 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
91 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Ingo Molnar241771e2008-12-03 10:39:53 +010092};
93
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010094#define EVENT_CONSTRAINT(c, n, m) { \
95 { .idxmsk64[0] = (n) }, \
96 .code = (c), \
97 .cmask = (m), \
98}
Stephane Eranianb6900812009-10-06 16:42:09 +020099
Stephane Eranian1da53e02010-01-18 10:58:01 +0200100#define EVENT_CONSTRAINT_END \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100101 EVENT_CONSTRAINT(0, 0, 0)
Stephane Eranianb6900812009-10-06 16:42:09 +0200102
103#define for_each_event_constraint(e, c) \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200104 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200105
Ingo Molnar241771e2008-12-03 10:39:53 +0100106/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200107 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100108 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200109struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200110 const char *name;
111 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800112 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200113 void (*disable_all)(void);
114 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200115 void (*enable)(struct hw_perf_event *, int);
116 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530117 unsigned eventsel;
118 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100119 u64 (*event_map)(int);
120 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530121 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200122 int num_events;
123 int num_events_fixed;
124 int event_bits;
125 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200126 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200127 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200128 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200129 void (*enable_bts)(u64 config);
130 void (*disable_bts)(void);
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100131 void (*get_event_constraints)(struct cpu_hw_events *cpuc,
132 struct perf_event *event,
133 unsigned long *idxmsk);
134 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
135 struct perf_event *event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200136 const struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530137};
138
Robert Richter4a06bd82009-04-29 12:47:11 +0200139static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530140
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200141static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100142 .enabled = 1,
143};
Ingo Molnar241771e2008-12-03 10:39:53 +0100144
Stephane Eranian1da53e02010-01-18 10:58:01 +0200145static int x86_perf_event_set_period(struct perf_event *event,
146 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200147
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530148/*
Vince Weaver11d15782009-07-08 17:46:14 -0400149 * Not sure about some of these
150 */
151static const u64 p6_perfmon_event_map[] =
152{
153 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
154 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200155 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
156 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400157 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
158 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
159 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
160};
161
Ingo Molnardfc65092009-09-21 11:31:35 +0200162static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400163{
Ingo Molnardfc65092009-09-21 11:31:35 +0200164 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400165}
166
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200167/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200168 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200169 * We use this to effectively disable a counter.
170 *
171 * L2_RQSTS with 0 MESI unit mask.
172 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200173#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200174
Ingo Molnardfc65092009-09-21 11:31:35 +0200175static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400176{
177#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
178#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
179#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
180#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200181#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400182
183#define P6_EVNTSEL_MASK \
184 (P6_EVNTSEL_EVENT_MASK | \
185 P6_EVNTSEL_UNIT_MASK | \
186 P6_EVNTSEL_EDGE_MASK | \
187 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200188 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400189
Ingo Molnardfc65092009-09-21 11:31:35 +0200190 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400191}
192
Stephane Eranian1da53e02010-01-18 10:58:01 +0200193static struct event_constraint intel_p6_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200194{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200195 EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK), /* FLOPS */
196 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
197 EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
198 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
199 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
200 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
Stephane Eranianb6900812009-10-06 16:42:09 +0200201 EVENT_CONSTRAINT_END
202};
Vince Weaver11d15782009-07-08 17:46:14 -0400203
204/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530205 * Intel PerfMon v3. Used on Core2 and later.
206 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100207static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100208{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200209 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
210 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
211 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
212 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
213 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
214 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
215 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100216};
217
Stephane Eranian1da53e02010-01-18 10:58:01 +0200218static struct event_constraint intel_core_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200219{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200220 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
221 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
222 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
223 EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
224 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
225 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
226 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
227 EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
228 EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
229 EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
230 EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
Stephane Eranianb6900812009-10-06 16:42:09 +0200231 EVENT_CONSTRAINT_END
232};
233
Stephane Eranian1da53e02010-01-18 10:58:01 +0200234static struct event_constraint intel_nehalem_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200235{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200236 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
237 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
238 EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
239 EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
240 EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
241 EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
242 EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
243 EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
244 EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
245 EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
246 EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
247 EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
248 EVENT_CONSTRAINT_END
249};
250
251static struct event_constraint intel_gen_event_constraints[] =
252{
253 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
254 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
Stephane Eranianb6900812009-10-06 16:42:09 +0200255 EVENT_CONSTRAINT_END
256};
257
Ingo Molnardfc65092009-09-21 11:31:35 +0200258static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530259{
Ingo Molnardfc65092009-09-21 11:31:35 +0200260 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530261}
Ingo Molnar241771e2008-12-03 10:39:53 +0100262
Ingo Molnar8326f442009-06-05 20:22:46 +0200263/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200264 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200265 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200266 * 'not supported', -1 means 'hw_event makes no sense on
267 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200268 * ID.
269 */
270
271#define C(x) PERF_COUNT_HW_CACHE_##x
272
273static u64 __read_mostly hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX];
277
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900278static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200279 [PERF_COUNT_HW_CACHE_MAX]
280 [PERF_COUNT_HW_CACHE_OP_MAX]
281 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
282{
283 [ C(L1D) ] = {
284 [ C(OP_READ) ] = {
285 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
286 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
287 },
288 [ C(OP_WRITE) ] = {
289 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
290 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
291 },
292 [ C(OP_PREFETCH) ] = {
293 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
294 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
295 },
296 },
297 [ C(L1I ) ] = {
298 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800299 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200300 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
301 },
302 [ C(OP_WRITE) ] = {
303 [ C(RESULT_ACCESS) ] = -1,
304 [ C(RESULT_MISS) ] = -1,
305 },
306 [ C(OP_PREFETCH) ] = {
307 [ C(RESULT_ACCESS) ] = 0x0,
308 [ C(RESULT_MISS) ] = 0x0,
309 },
310 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200311 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200312 [ C(OP_READ) ] = {
313 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
314 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
315 },
316 [ C(OP_WRITE) ] = {
317 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
318 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
319 },
320 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200321 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
322 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200323 },
324 },
325 [ C(DTLB) ] = {
326 [ C(OP_READ) ] = {
327 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
328 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
329 },
330 [ C(OP_WRITE) ] = {
331 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
332 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
333 },
334 [ C(OP_PREFETCH) ] = {
335 [ C(RESULT_ACCESS) ] = 0x0,
336 [ C(RESULT_MISS) ] = 0x0,
337 },
338 },
339 [ C(ITLB) ] = {
340 [ C(OP_READ) ] = {
341 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800342 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200343 },
344 [ C(OP_WRITE) ] = {
345 [ C(RESULT_ACCESS) ] = -1,
346 [ C(RESULT_MISS) ] = -1,
347 },
348 [ C(OP_PREFETCH) ] = {
349 [ C(RESULT_ACCESS) ] = -1,
350 [ C(RESULT_MISS) ] = -1,
351 },
352 },
353 [ C(BPU ) ] = {
354 [ C(OP_READ) ] = {
355 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
356 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
357 },
358 [ C(OP_WRITE) ] = {
359 [ C(RESULT_ACCESS) ] = -1,
360 [ C(RESULT_MISS) ] = -1,
361 },
362 [ C(OP_PREFETCH) ] = {
363 [ C(RESULT_ACCESS) ] = -1,
364 [ C(RESULT_MISS) ] = -1,
365 },
366 },
367};
368
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900369static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200370 [PERF_COUNT_HW_CACHE_MAX]
371 [PERF_COUNT_HW_CACHE_OP_MAX]
372 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
373{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200374 [ C(L1D) ] = {
375 [ C(OP_READ) ] = {
376 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
377 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
378 },
379 [ C(OP_WRITE) ] = {
380 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
381 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
382 },
383 [ C(OP_PREFETCH) ] = {
384 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
385 [ C(RESULT_MISS) ] = 0,
386 },
387 },
388 [ C(L1I ) ] = {
389 [ C(OP_READ) ] = {
390 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
391 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
392 },
393 [ C(OP_WRITE) ] = {
394 [ C(RESULT_ACCESS) ] = -1,
395 [ C(RESULT_MISS) ] = -1,
396 },
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = 0,
399 [ C(RESULT_MISS) ] = 0,
400 },
401 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200402 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200403 [ C(OP_READ) ] = {
404 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
405 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
406 },
407 [ C(OP_WRITE) ] = {
408 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
409 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
410 },
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = 0,
413 [ C(RESULT_MISS) ] = 0,
414 },
415 },
416 [ C(DTLB) ] = {
417 [ C(OP_READ) ] = {
418 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
419 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
420 },
421 [ C(OP_WRITE) ] = {
422 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
423 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
424 },
425 [ C(OP_PREFETCH) ] = {
426 [ C(RESULT_ACCESS) ] = 0,
427 [ C(RESULT_MISS) ] = 0,
428 },
429 },
430 [ C(ITLB) ] = {
431 [ C(OP_READ) ] = {
432 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
433 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
434 },
435 [ C(OP_WRITE) ] = {
436 [ C(RESULT_ACCESS) ] = -1,
437 [ C(RESULT_MISS) ] = -1,
438 },
439 [ C(OP_PREFETCH) ] = {
440 [ C(RESULT_ACCESS) ] = -1,
441 [ C(RESULT_MISS) ] = -1,
442 },
443 },
444 [ C(BPU ) ] = {
445 [ C(OP_READ) ] = {
446 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
447 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
448 },
449 [ C(OP_WRITE) ] = {
450 [ C(RESULT_ACCESS) ] = -1,
451 [ C(RESULT_MISS) ] = -1,
452 },
453 [ C(OP_PREFETCH) ] = {
454 [ C(RESULT_ACCESS) ] = -1,
455 [ C(RESULT_MISS) ] = -1,
456 },
457 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200458};
459
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900460static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200461 [PERF_COUNT_HW_CACHE_MAX]
462 [PERF_COUNT_HW_CACHE_OP_MAX]
463 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
464{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200465 [ C(L1D) ] = {
466 [ C(OP_READ) ] = {
467 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
468 [ C(RESULT_MISS) ] = 0,
469 },
470 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800471 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200472 [ C(RESULT_MISS) ] = 0,
473 },
474 [ C(OP_PREFETCH) ] = {
475 [ C(RESULT_ACCESS) ] = 0x0,
476 [ C(RESULT_MISS) ] = 0,
477 },
478 },
479 [ C(L1I ) ] = {
480 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800481 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
482 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200483 },
484 [ C(OP_WRITE) ] = {
485 [ C(RESULT_ACCESS) ] = -1,
486 [ C(RESULT_MISS) ] = -1,
487 },
488 [ C(OP_PREFETCH) ] = {
489 [ C(RESULT_ACCESS) ] = 0,
490 [ C(RESULT_MISS) ] = 0,
491 },
492 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200493 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200494 [ C(OP_READ) ] = {
495 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
496 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
497 },
498 [ C(OP_WRITE) ] = {
499 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
500 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
501 },
502 [ C(OP_PREFETCH) ] = {
503 [ C(RESULT_ACCESS) ] = 0,
504 [ C(RESULT_MISS) ] = 0,
505 },
506 },
507 [ C(DTLB) ] = {
508 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800509 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200510 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
511 },
512 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800513 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200514 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
515 },
516 [ C(OP_PREFETCH) ] = {
517 [ C(RESULT_ACCESS) ] = 0,
518 [ C(RESULT_MISS) ] = 0,
519 },
520 },
521 [ C(ITLB) ] = {
522 [ C(OP_READ) ] = {
523 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
524 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
525 },
526 [ C(OP_WRITE) ] = {
527 [ C(RESULT_ACCESS) ] = -1,
528 [ C(RESULT_MISS) ] = -1,
529 },
530 [ C(OP_PREFETCH) ] = {
531 [ C(RESULT_ACCESS) ] = -1,
532 [ C(RESULT_MISS) ] = -1,
533 },
534 },
535 [ C(BPU ) ] = {
536 [ C(OP_READ) ] = {
537 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
538 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
539 },
540 [ C(OP_WRITE) ] = {
541 [ C(RESULT_ACCESS) ] = -1,
542 [ C(RESULT_MISS) ] = -1,
543 },
544 [ C(OP_PREFETCH) ] = {
545 [ C(RESULT_ACCESS) ] = -1,
546 [ C(RESULT_MISS) ] = -1,
547 },
548 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200549};
550
Ingo Molnardfc65092009-09-21 11:31:35 +0200551static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100552{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100553#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
554#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200555#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
556#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200557#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100558
Ingo Molnar128f0482009-06-03 22:19:36 +0200559#define CORE_EVNTSEL_MASK \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200560 (INTEL_ARCH_EVTSEL_MASK | \
561 INTEL_ARCH_UNIT_MASK | \
562 INTEL_ARCH_EDGE_MASK | \
563 INTEL_ARCH_INV_MASK | \
564 INTEL_ARCH_CNT_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100565
Ingo Molnardfc65092009-09-21 11:31:35 +0200566 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100567}
568
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900569static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200570 [PERF_COUNT_HW_CACHE_MAX]
571 [PERF_COUNT_HW_CACHE_OP_MAX]
572 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
573{
574 [ C(L1D) ] = {
575 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530576 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
577 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200578 },
579 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530580 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200581 [ C(RESULT_MISS) ] = 0,
582 },
583 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530584 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
585 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200586 },
587 },
588 [ C(L1I ) ] = {
589 [ C(OP_READ) ] = {
590 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
591 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
592 },
593 [ C(OP_WRITE) ] = {
594 [ C(RESULT_ACCESS) ] = -1,
595 [ C(RESULT_MISS) ] = -1,
596 },
597 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530598 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200599 [ C(RESULT_MISS) ] = 0,
600 },
601 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200602 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200603 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530604 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
605 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200606 },
607 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530608 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200609 [ C(RESULT_MISS) ] = 0,
610 },
611 [ C(OP_PREFETCH) ] = {
612 [ C(RESULT_ACCESS) ] = 0,
613 [ C(RESULT_MISS) ] = 0,
614 },
615 },
616 [ C(DTLB) ] = {
617 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530618 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
619 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200620 },
621 [ C(OP_WRITE) ] = {
622 [ C(RESULT_ACCESS) ] = 0,
623 [ C(RESULT_MISS) ] = 0,
624 },
625 [ C(OP_PREFETCH) ] = {
626 [ C(RESULT_ACCESS) ] = 0,
627 [ C(RESULT_MISS) ] = 0,
628 },
629 },
630 [ C(ITLB) ] = {
631 [ C(OP_READ) ] = {
632 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
633 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
634 },
635 [ C(OP_WRITE) ] = {
636 [ C(RESULT_ACCESS) ] = -1,
637 [ C(RESULT_MISS) ] = -1,
638 },
639 [ C(OP_PREFETCH) ] = {
640 [ C(RESULT_ACCESS) ] = -1,
641 [ C(RESULT_MISS) ] = -1,
642 },
643 },
644 [ C(BPU ) ] = {
645 [ C(OP_READ) ] = {
646 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
647 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
648 },
649 [ C(OP_WRITE) ] = {
650 [ C(RESULT_ACCESS) ] = -1,
651 [ C(RESULT_MISS) ] = -1,
652 },
653 [ C(OP_PREFETCH) ] = {
654 [ C(RESULT_ACCESS) ] = -1,
655 [ C(RESULT_MISS) ] = -1,
656 },
657 },
658};
659
Ingo Molnar241771e2008-12-03 10:39:53 +0100660/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530661 * AMD Performance Monitor K7 and later.
662 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100663static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530664{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200665 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
666 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
667 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
668 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
669 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
670 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530671};
672
Ingo Molnardfc65092009-09-21 11:31:35 +0200673static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530674{
Ingo Molnardfc65092009-09-21 11:31:35 +0200675 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530676}
677
Ingo Molnardfc65092009-09-21 11:31:35 +0200678static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100679{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100680#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
681#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200682#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
683#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200684#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100685
686#define K7_EVNTSEL_MASK \
687 (K7_EVNTSEL_EVENT_MASK | \
688 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200689 K7_EVNTSEL_EDGE_MASK | \
690 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200691 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100692
Ingo Molnardfc65092009-09-21 11:31:35 +0200693 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100694}
695
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530696/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200697 * Propagate event elapsed time into the generic event.
698 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100699 * Returns the delta events processed.
700 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200701static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200702x86_perf_event_update(struct perf_event *event,
703 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100704{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200705 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200706 u64 prev_raw_count, new_raw_count;
707 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100708
Markus Metzger30dd5682009-07-21 15:56:48 +0200709 if (idx == X86_PMC_IDX_FIXED_BTS)
710 return 0;
711
Ingo Molnaree060942008-12-13 09:00:03 +0100712 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200713 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100714 *
715 * Our tactic to handle this is to first atomically read and
716 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200717 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100718 */
719again:
720 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200721 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100722
723 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
724 new_raw_count) != prev_raw_count)
725 goto again;
726
727 /*
728 * Now we have the new raw value and have updated the prev
729 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200730 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100731 *
732 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200733 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100734 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200735 delta = (new_raw_count << shift) - (prev_raw_count << shift);
736 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100737
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200738 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100739 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200740
741 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100742}
743
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200744static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200745static DEFINE_MUTEX(pmc_reserve_mutex);
746
747static bool reserve_pmc_hardware(void)
748{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200749#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200750 int i;
751
752 if (nmi_watchdog == NMI_LOCAL_APIC)
753 disable_lapic_nmi_watchdog();
754
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200755 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200756 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200757 goto perfctr_fail;
758 }
759
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200760 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200761 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200762 goto eventsel_fail;
763 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200764#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200765
766 return true;
767
Ingo Molnar04da8a42009-08-11 10:40:08 +0200768#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200769eventsel_fail:
770 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200771 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200772
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200773 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200774
775perfctr_fail:
776 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200777 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200778
779 if (nmi_watchdog == NMI_LOCAL_APIC)
780 enable_lapic_nmi_watchdog();
781
782 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200783#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200784}
785
786static void release_pmc_hardware(void)
787{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200788#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200789 int i;
790
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200791 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200792 release_perfctr_nmi(x86_pmu.perfctr + i);
793 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200794 }
795
796 if (nmi_watchdog == NMI_LOCAL_APIC)
797 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200798#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200799}
800
Markus Metzger30dd5682009-07-21 15:56:48 +0200801static inline bool bts_available(void)
802{
803 return x86_pmu.enable_bts != NULL;
804}
805
806static inline void init_debug_store_on_cpu(int cpu)
807{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200808 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200809
810 if (!ds)
811 return;
812
813 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200814 (u32)((u64)(unsigned long)ds),
815 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200816}
817
818static inline void fini_debug_store_on_cpu(int cpu)
819{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200820 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200821 return;
822
823 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
824}
825
826static void release_bts_hardware(void)
827{
828 int cpu;
829
830 if (!bts_available())
831 return;
832
833 get_online_cpus();
834
835 for_each_online_cpu(cpu)
836 fini_debug_store_on_cpu(cpu);
837
838 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200839 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200840
841 if (!ds)
842 continue;
843
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200844 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200845
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200846 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200847 kfree(ds);
848 }
849
850 put_online_cpus();
851}
852
853static int reserve_bts_hardware(void)
854{
855 int cpu, err = 0;
856
857 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200858 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200859
860 get_online_cpus();
861
862 for_each_possible_cpu(cpu) {
863 struct debug_store *ds;
864 void *buffer;
865
866 err = -ENOMEM;
867 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
868 if (unlikely(!buffer))
869 break;
870
871 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
872 if (unlikely(!ds)) {
873 kfree(buffer);
874 break;
875 }
876
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200877 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200878 ds->bts_index = ds->bts_buffer_base;
879 ds->bts_absolute_maximum =
880 ds->bts_buffer_base + BTS_BUFFER_SIZE;
881 ds->bts_interrupt_threshold =
882 ds->bts_absolute_maximum - BTS_OVFL_TH;
883
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200884 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200885 err = 0;
886 }
887
888 if (err)
889 release_bts_hardware();
890 else {
891 for_each_online_cpu(cpu)
892 init_debug_store_on_cpu(cpu);
893 }
894
895 put_online_cpus();
896
897 return err;
898}
899
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200900static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200901{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200902 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200903 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200904 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200905 mutex_unlock(&pmc_reserve_mutex);
906 }
907}
908
Robert Richter85cf9db2009-04-29 12:47:20 +0200909static inline int x86_pmu_initialized(void)
910{
911 return x86_pmu.handle_irq != NULL;
912}
913
Ingo Molnar8326f442009-06-05 20:22:46 +0200914static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200915set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200916{
917 unsigned int cache_type, cache_op, cache_result;
918 u64 config, val;
919
920 config = attr->config;
921
922 cache_type = (config >> 0) & 0xff;
923 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
924 return -EINVAL;
925
926 cache_op = (config >> 8) & 0xff;
927 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
928 return -EINVAL;
929
930 cache_result = (config >> 16) & 0xff;
931 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
932 return -EINVAL;
933
934 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
935
936 if (val == 0)
937 return -ENOENT;
938
939 if (val == -1)
940 return -EINVAL;
941
942 hwc->config |= val;
943
944 return 0;
945}
946
Markus Metzger30dd5682009-07-21 15:56:48 +0200947static void intel_pmu_enable_bts(u64 config)
948{
949 unsigned long debugctlmsr;
950
951 debugctlmsr = get_debugctlmsr();
952
953 debugctlmsr |= X86_DEBUGCTL_TR;
954 debugctlmsr |= X86_DEBUGCTL_BTS;
955 debugctlmsr |= X86_DEBUGCTL_BTINT;
956
957 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
958 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
959
960 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
961 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
962
963 update_debugctlmsr(debugctlmsr);
964}
965
966static void intel_pmu_disable_bts(void)
967{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200969 unsigned long debugctlmsr;
970
971 if (!cpuc->ds)
972 return;
973
974 debugctlmsr = get_debugctlmsr();
975
976 debugctlmsr &=
977 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
978 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
979
980 update_debugctlmsr(debugctlmsr);
981}
982
Ingo Molnaree060942008-12-13 09:00:03 +0100983/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200984 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100985 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100987{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200988 struct perf_event_attr *attr = &event->attr;
989 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200990 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200991 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100992
Robert Richter85cf9db2009-04-29 12:47:20 +0200993 if (!x86_pmu_initialized())
994 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100995
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200996 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200997 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200998 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200999 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +02001000 if (!reserve_pmc_hardware())
1001 err = -EBUSY;
1002 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001003 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +02001004 }
1005 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001006 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001007 mutex_unlock(&pmc_reserve_mutex);
1008 }
1009 if (err)
1010 return err;
1011
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001012 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001013
Ingo Molnar241771e2008-12-03 10:39:53 +01001014 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001015 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +01001016 * (keep 'enabled' bit clear for now)
1017 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001018 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +01001019
Stephane Eranianb6900812009-10-06 16:42:09 +02001020 hwc->idx = -1;
1021
Ingo Molnar241771e2008-12-03 10:39:53 +01001022 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001023 * Count user and OS events unless requested not to.
1024 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001025 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001026 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +02001027 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001028 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1029
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001030 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001031 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001032 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001033 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001034 } else {
1035 /*
1036 * If we have a PMU initialized but no APIC
1037 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001038 * events (user-space has to fall back and
1039 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001040 */
1041 if (!x86_pmu.apic)
1042 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001043 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001044
Ingo Molnar241771e2008-12-03 10:39:53 +01001045 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001046 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001047 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001048 if (attr->type == PERF_TYPE_RAW) {
1049 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001050 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001051 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001052
Ingo Molnar8326f442009-06-05 20:22:46 +02001053 if (attr->type == PERF_TYPE_HW_CACHE)
1054 return set_ext_hw_attr(hwc, attr);
1055
1056 if (attr->config >= x86_pmu.max_events)
1057 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001058
Ingo Molnar8326f442009-06-05 20:22:46 +02001059 /*
1060 * The generic map:
1061 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001062 config = x86_pmu.event_map(attr->config);
1063
1064 if (config == 0)
1065 return -ENOENT;
1066
1067 if (config == -1LL)
1068 return -EINVAL;
1069
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001070 /*
1071 * Branch tracing:
1072 */
1073 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001074 (hwc->sample_period == 1)) {
1075 /* BTS is not supported by this architecture. */
1076 if (!bts_available())
1077 return -EOPNOTSUPP;
1078
1079 /* BTS is currently only allowed for user-mode. */
1080 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1081 return -EOPNOTSUPP;
1082 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001083
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001084 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001085
Ingo Molnar241771e2008-12-03 10:39:53 +01001086 return 0;
1087}
1088
Vince Weaver11d15782009-07-08 17:46:14 -04001089static void p6_pmu_disable_all(void)
1090{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001091 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001092 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001093
1094 if (!cpuc->enabled)
1095 return;
1096
1097 cpuc->enabled = 0;
1098 barrier();
1099
1100 /* p6 only has one enable register */
1101 rdmsrl(MSR_P6_EVNTSEL0, val);
1102 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1103 wrmsrl(MSR_P6_EVNTSEL0, val);
1104}
1105
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001106static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001107{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001109
1110 if (!cpuc->enabled)
1111 return;
1112
1113 cpuc->enabled = 0;
1114 barrier();
1115
Ingo Molnar862a1a52008-12-17 13:09:20 +01001116 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001117
1118 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1119 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001120}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301121
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001122static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301123{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001125 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001126
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001127 if (!cpuc->enabled)
1128 return;
1129
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001130 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001131 /*
1132 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001133 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001134 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001135 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001136 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301137
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001138 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001139 u64 val;
1140
Robert Richter43f62012009-04-29 16:55:56 +02001141 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001142 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301143 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001144 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1145 continue;
1146 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1147 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301148 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301149}
1150
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001151void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301152{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001153 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1154
Robert Richter85cf9db2009-04-29 12:47:20 +02001155 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001156 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001157
1158 if (cpuc->enabled)
1159 cpuc->n_added = 0;
1160
1161 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301162}
Ingo Molnar241771e2008-12-03 10:39:53 +01001163
Vince Weaver11d15782009-07-08 17:46:14 -04001164static void p6_pmu_enable_all(void)
1165{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001166 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001167 unsigned long val;
1168
1169 if (cpuc->enabled)
1170 return;
1171
1172 cpuc->enabled = 1;
1173 barrier();
1174
1175 /* p6 only has one enable register */
1176 rdmsrl(MSR_P6_EVNTSEL0, val);
1177 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1178 wrmsrl(MSR_P6_EVNTSEL0, val);
1179}
1180
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001181static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301182{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001184
1185 if (cpuc->enabled)
1186 return;
1187
1188 cpuc->enabled = 1;
1189 barrier();
1190
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001191 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001192
1193 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001194 struct perf_event *event =
1195 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001196
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001197 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001198 return;
1199
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001200 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001201 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301202}
1203
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001204static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301205{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001206 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301207 int idx;
1208
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001209 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001210 return;
1211
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001212 cpuc->enabled = 1;
1213 barrier();
1214
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001215 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1216 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001217 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001218
Robert Richter43f62012009-04-29 16:55:56 +02001219 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001220 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001221
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001222 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001223 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1224 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301225 }
1226}
1227
Stephane Eranian1da53e02010-01-18 10:58:01 +02001228static const struct pmu pmu;
1229
1230static inline int is_x86_event(struct perf_event *event)
1231{
1232 return event->pmu == &pmu;
1233}
1234
1235static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1236{
1237 int i, j , w, num;
1238 int weight, wmax;
1239 unsigned long *c;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001240 unsigned long constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001241 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1242 struct hw_perf_event *hwc;
1243
1244 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1245
1246 for (i = 0; i < n; i++) {
1247 x86_pmu.get_event_constraints(cpuc,
1248 cpuc->event_list[i],
1249 constraints[i]);
1250 }
1251
1252 /*
Stephane Eranian81130702010-01-21 17:39:01 +02001253 * fastpath, try to reuse previous register
1254 */
1255 for (i = 0, num = n; i < n; i++, num--) {
1256 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001257 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +02001258
1259 /* never assigned */
1260 if (hwc->idx == -1)
1261 break;
1262
1263 /* constraint still honored */
1264 if (!test_bit(hwc->idx, c))
1265 break;
1266
1267 /* not already used */
1268 if (test_bit(hwc->idx, used_mask))
1269 break;
1270
1271#if 0
1272 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1273 smp_processor_id(),
1274 hwc->config,
1275 hwc->idx,
1276 assign ? 'y' : 'n');
1277#endif
1278
1279 set_bit(hwc->idx, used_mask);
1280 if (assign)
1281 assign[i] = hwc->idx;
1282 }
1283 if (!num)
1284 goto done;
1285
1286 /*
1287 * begin slow path
1288 */
1289
1290 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1291
1292 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001293 * weight = number of possible counters
1294 *
1295 * 1 = most constrained, only works on one counter
1296 * wmax = least constrained, works on any counter
1297 *
1298 * assign events to counters starting with most
1299 * constrained events.
1300 */
1301 wmax = x86_pmu.num_events;
1302
1303 /*
1304 * when fixed event counters are present,
1305 * wmax is incremented by 1 to account
1306 * for one more choice
1307 */
1308 if (x86_pmu.num_events_fixed)
1309 wmax++;
1310
Stephane Eranian81130702010-01-21 17:39:01 +02001311 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001312 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +02001313 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +01001314 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001315 hwc = &cpuc->event_list[i]->hw;
1316
1317 weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1318 if (weight != w)
1319 continue;
1320
Stephane Eranian1da53e02010-01-18 10:58:01 +02001321 for_each_bit(j, c, X86_PMC_IDX_MAX) {
1322 if (!test_bit(j, used_mask))
1323 break;
1324 }
1325
1326 if (j == X86_PMC_IDX_MAX)
1327 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001328
1329#if 0
Stephane Eranian81130702010-01-21 17:39:01 +02001330 pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
Stephane Eranian1da53e02010-01-18 10:58:01 +02001331 smp_processor_id(),
1332 hwc->config,
1333 j,
1334 assign ? 'y' : 'n');
1335#endif
1336
Stephane Eranian81130702010-01-21 17:39:01 +02001337 set_bit(j, used_mask);
1338
Stephane Eranian1da53e02010-01-18 10:58:01 +02001339 if (assign)
1340 assign[i] = j;
1341 num--;
1342 }
1343 }
Stephane Eranian81130702010-01-21 17:39:01 +02001344done:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001345 /*
1346 * scheduling failed or is just a simulation,
1347 * free resources if necessary
1348 */
1349 if (!assign || num) {
1350 for (i = 0; i < n; i++) {
1351 if (x86_pmu.put_event_constraints)
1352 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1353 }
1354 }
1355 return num ? -ENOSPC : 0;
1356}
1357
1358/*
1359 * dogrp: true if must collect siblings events (group)
1360 * returns total number of events and error code
1361 */
1362static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1363{
1364 struct perf_event *event;
1365 int n, max_count;
1366
1367 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1368
1369 /* current number of events already accepted */
1370 n = cpuc->n_events;
1371
1372 if (is_x86_event(leader)) {
1373 if (n >= max_count)
1374 return -ENOSPC;
1375 cpuc->event_list[n] = leader;
1376 n++;
1377 }
1378 if (!dogrp)
1379 return n;
1380
1381 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1382 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +02001383 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001384 continue;
1385
1386 if (n >= max_count)
1387 return -ENOSPC;
1388
1389 cpuc->event_list[n] = event;
1390 n++;
1391 }
1392 return n;
1393}
1394
1395
1396static inline void x86_assign_hw_event(struct perf_event *event,
1397 struct hw_perf_event *hwc, int idx)
1398{
1399 hwc->idx = idx;
1400
1401 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1402 hwc->config_base = 0;
1403 hwc->event_base = 0;
1404 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1405 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1406 /*
1407 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1408 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1409 */
1410 hwc->event_base =
1411 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1412 } else {
1413 hwc->config_base = x86_pmu.eventsel;
1414 hwc->event_base = x86_pmu.perfctr;
1415 }
1416}
1417
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001418void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001419{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001420 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1421 struct perf_event *event;
1422 struct hw_perf_event *hwc;
1423 int i;
1424
Robert Richter85cf9db2009-04-29 12:47:20 +02001425 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001426 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001427 if (cpuc->n_added) {
1428 /*
1429 * apply assignment obtained either from
1430 * hw_perf_group_sched_in() or x86_pmu_enable()
1431 *
1432 * step1: save events moving to new counters
1433 * step2: reprogram moved events into new counters
1434 */
1435 for (i = 0; i < cpuc->n_events; i++) {
1436
1437 event = cpuc->event_list[i];
1438 hwc = &event->hw;
1439
1440 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1441 continue;
1442
1443 x86_pmu.disable(hwc, hwc->idx);
1444
1445 clear_bit(hwc->idx, cpuc->active_mask);
1446 barrier();
1447 cpuc->events[hwc->idx] = NULL;
1448
1449 x86_perf_event_update(event, hwc, hwc->idx);
1450
1451 hwc->idx = -1;
1452 }
1453
1454 for (i = 0; i < cpuc->n_events; i++) {
1455
1456 event = cpuc->event_list[i];
1457 hwc = &event->hw;
1458
1459 if (hwc->idx == -1) {
1460 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1461 x86_perf_event_set_period(event, hwc, hwc->idx);
1462 }
1463 /*
1464 * need to mark as active because x86_pmu_disable()
1465 * clear active_mask and eventsp[] yet it preserves
1466 * idx
1467 */
1468 set_bit(hwc->idx, cpuc->active_mask);
1469 cpuc->events[hwc->idx] = event;
1470
1471 x86_pmu.enable(hwc, hwc->idx);
1472 perf_event_update_userpage(event);
1473 }
1474 cpuc->n_added = 0;
1475 perf_events_lapic_init();
1476 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001477 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001478}
Ingo Molnaree060942008-12-13 09:00:03 +01001479
Robert Richter19d84da2009-04-29 12:47:25 +02001480static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001481{
1482 u64 status;
1483
1484 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1485
1486 return status;
1487}
1488
Robert Richterdee5d902009-04-29 12:47:07 +02001489static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001490{
1491 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1492}
1493
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001494static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001495{
Vince Weaver11d15782009-07-08 17:46:14 -04001496 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001497 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001498}
1499
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001500static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001501{
Vince Weaver11d15782009-07-08 17:46:14 -04001502 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001503}
1504
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001505static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001506intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001507{
1508 int idx = __idx - X86_PMC_IDX_FIXED;
1509 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001510
1511 mask = 0xfULL << (idx * 4);
1512
1513 rdmsrl(hwc->config_base, ctrl_val);
1514 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001515 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1516}
1517
1518static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001519p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001520{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001521 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1522 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001523
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001524 if (cpuc->enabled)
1525 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001526
1527 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001528}
1529
1530static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001531intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001532{
Markus Metzger30dd5682009-07-21 15:56:48 +02001533 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1534 intel_pmu_disable_bts();
1535 return;
1536 }
1537
Robert Richterd4369892009-04-29 12:47:19 +02001538 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1539 intel_pmu_disable_fixed(hwc, idx);
1540 return;
1541 }
1542
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001543 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001544}
1545
1546static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001547amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001548{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001549 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001550}
1551
Tejun Heo245b2e72009-06-24 15:13:48 +09001552static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001553
Ingo Molnaree060942008-12-13 09:00:03 +01001554/*
1555 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001556 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001557 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001558static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001559x86_perf_event_set_period(struct perf_event *event,
1560 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001561{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001562 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001563 s64 period = hwc->sample_period;
1564 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001565
Markus Metzger30dd5682009-07-21 15:56:48 +02001566 if (idx == X86_PMC_IDX_FIXED_BTS)
1567 return 0;
1568
Ingo Molnaree060942008-12-13 09:00:03 +01001569 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001570 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001571 */
1572 if (unlikely(left <= -period)) {
1573 left = period;
1574 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001575 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001576 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001577 }
1578
1579 if (unlikely(left <= 0)) {
1580 left += period;
1581 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001582 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001583 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001584 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001585 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001586 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001587 */
1588 if (unlikely(left < 2))
1589 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001590
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001591 if (left > x86_pmu.max_period)
1592 left = x86_pmu.max_period;
1593
Tejun Heo245b2e72009-06-24 15:13:48 +09001594 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001595
1596 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001598 * mark it to be able to extra future deltas:
1599 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001600 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001601
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001602 err = checking_wrmsrl(hwc->event_base + idx,
1603 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001604
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001605 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001606
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001607 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001608}
1609
1610static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001611intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001612{
1613 int idx = __idx - X86_PMC_IDX_FIXED;
1614 u64 ctrl_val, bits, mask;
1615 int err;
1616
1617 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001618 * Enable IRQ generation (0x8),
1619 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1620 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001621 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001622 bits = 0x8ULL;
1623 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1624 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001625 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1626 bits |= 0x1;
1627 bits <<= (idx * 4);
1628 mask = 0xfULL << (idx * 4);
1629
1630 rdmsrl(hwc->config_base, ctrl_val);
1631 ctrl_val &= ~mask;
1632 ctrl_val |= bits;
1633 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001634}
1635
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001636static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001637{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001638 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001639 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001640
Peter Zijlstra984b8382009-07-10 09:59:56 +02001641 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001642 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001643 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1644
1645 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001646}
1647
1648
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001649static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001650{
Markus Metzger30dd5682009-07-21 15:56:48 +02001651 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001652 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001653 return;
1654
1655 intel_pmu_enable_bts(hwc->config);
1656 return;
1657 }
1658
Robert Richter7c90cc42009-04-29 12:47:18 +02001659 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1660 intel_pmu_enable_fixed(hwc, idx);
1661 return;
1662 }
1663
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001664 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001665}
1666
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001667static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001668{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001670
1671 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001672 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001673}
1674
Ingo Molnaree060942008-12-13 09:00:03 +01001675/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001676 * activate a single event
1677 *
1678 * The event is added to the group of enabled events
1679 * but only if it can be scehduled with existing events.
1680 *
1681 * Called with PMU disabled. If successful and return value 1,
1682 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001683 */
1684static int x86_pmu_enable(struct perf_event *event)
1685{
1686 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001687 struct hw_perf_event *hwc;
1688 int assign[X86_PMC_IDX_MAX];
1689 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001690
Stephane Eranian1da53e02010-01-18 10:58:01 +02001691 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001692
Stephane Eranian1da53e02010-01-18 10:58:01 +02001693 n0 = cpuc->n_events;
1694 n = collect_events(cpuc, event, false);
1695 if (n < 0)
1696 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001697
Stephane Eranian1da53e02010-01-18 10:58:01 +02001698 ret = x86_schedule_events(cpuc, n, assign);
1699 if (ret)
1700 return ret;
1701 /*
1702 * copy new assignment, now we know it is possible
1703 * will be used by hw_perf_enable()
1704 */
1705 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001706
Stephane Eranian1da53e02010-01-18 10:58:01 +02001707 cpuc->n_events = n;
1708 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001709
Stephane Eranian1da53e02010-01-18 10:58:01 +02001710 if (hwc->idx != -1)
1711 x86_perf_event_set_period(event, hwc, hwc->idx);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001712
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001713 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001714}
1715
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001716static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001717{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001718 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1719 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001720
1721 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001722 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001723 return;
1724
1725 x86_pmu.enable(hwc, hwc->idx);
1726}
1727
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001728void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001729{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001730 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001731 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001732 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001733 int cpu, idx;
1734
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001735 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001736 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001737
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001738 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001739
1740 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001742
Robert Richterfaa28ae2009-04-29 12:47:13 +02001743 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301744 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1745 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1746 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1747 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001748
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301749 pr_info("\n");
1750 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1751 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1752 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1753 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301754 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001755 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001756
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001757 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001758 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1759 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001760
Tejun Heo245b2e72009-06-24 15:13:48 +09001761 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001762
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301763 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001764 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301765 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001766 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301767 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001768 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001769 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001770 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001771 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1772
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301773 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001774 cpu, idx, pmc_count);
1775 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001776 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001777}
1778
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001779static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001780{
1781 struct debug_store *ds = cpuc->ds;
1782 struct bts_record {
1783 u64 from;
1784 u64 to;
1785 u64 flags;
1786 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001787 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001788 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001789 struct perf_output_handle handle;
1790 struct perf_event_header header;
1791 struct perf_sample_data data;
1792 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001793
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001794 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001795 return;
1796
1797 if (!ds)
1798 return;
1799
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001800 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1801 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001802
Markus Metzger5622f292009-09-15 13:00:23 +02001803 if (top <= at)
1804 return;
1805
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001806 ds->bts_index = ds->bts_buffer_base;
1807
Markus Metzger30dd5682009-07-21 15:56:48 +02001808
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001809 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001810 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001811 data.raw = NULL;
Markus Metzger5622f292009-09-15 13:00:23 +02001812 regs.ip = 0;
1813
1814 /*
1815 * Prepare a generic sample, i.e. fill in the invariant fields.
1816 * We will overwrite the from and to address before we output
1817 * the sample.
1818 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001819 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001820
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001821 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001822 header.size * (top - at), 1, 1))
1823 return;
1824
1825 for (; at < top; at++) {
1826 data.ip = at->from;
1827 data.addr = at->to;
1828
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001829 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001830 }
1831
Markus Metzger5622f292009-09-15 13:00:23 +02001832 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001833
1834 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001835 event->hw.interrupts++;
1836 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001837}
1838
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001839static void x86_pmu_disable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001840{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001841 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1842 struct hw_perf_event *hwc = &event->hw;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001843 int i, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001844
Robert Richter09534232009-04-29 12:47:16 +02001845 /*
1846 * Must be done before we disable, otherwise the nmi handler
1847 * could reenable again:
1848 */
Robert Richter43f62012009-04-29 16:55:56 +02001849 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001850 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001851
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001852 /*
1853 * Make sure the cleared pointer becomes visible before we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001854 * (potentially) free the event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001855 */
Robert Richter527e26a2009-04-29 12:47:02 +02001856 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001857
Ingo Molnaree060942008-12-13 09:00:03 +01001858 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001859 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001860 * that we are disabling:
1861 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001862 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001863
1864 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001865 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1866 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001867
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001868 cpuc->events[idx] = NULL;
Peter Zijlstra194002b2009-06-22 16:35:24 +02001869
Stephane Eranian1da53e02010-01-18 10:58:01 +02001870 for (i = 0; i < cpuc->n_events; i++) {
1871 if (event == cpuc->event_list[i]) {
1872
1873 if (x86_pmu.put_event_constraints)
1874 x86_pmu.put_event_constraints(cpuc, event);
1875
1876 while (++i < cpuc->n_events)
1877 cpuc->event_list[i-1] = cpuc->event_list[i];
1878
1879 --cpuc->n_events;
1880 }
1881 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001882 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001883}
1884
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001885/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001886 * Save and restart an expired event. Called by NMI contexts,
1887 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001888 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001889static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001890{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001891 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001892 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001893 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001894
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895 x86_perf_event_update(event, hwc, idx);
1896 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001897
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001898 if (event->state == PERF_EVENT_STATE_ACTIVE)
1899 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001900
1901 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001902}
1903
Ingo Molnaraaba9802009-05-26 08:10:00 +02001904static void intel_pmu_reset(void)
1905{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001906 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001907 unsigned long flags;
1908 int idx;
1909
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001911 return;
1912
1913 local_irq_save(flags);
1914
1915 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1916
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001917 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001918 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1919 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1920 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001921 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001922 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1923 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001924 if (ds)
1925 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001926
1927 local_irq_restore(flags);
1928}
1929
Vince Weaver11d15782009-07-08 17:46:14 -04001930static int p6_pmu_handle_irq(struct pt_regs *regs)
1931{
1932 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001933 struct cpu_hw_events *cpuc;
1934 struct perf_event *event;
1935 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001936 int idx, handled = 0;
1937 u64 val;
1938
Vince Weaver11d15782009-07-08 17:46:14 -04001939 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001940 data.raw = NULL;
Vince Weaver11d15782009-07-08 17:46:14 -04001941
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001942 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001943
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001944 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001945 if (!test_bit(idx, cpuc->active_mask))
1946 continue;
1947
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001948 event = cpuc->events[idx];
1949 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001950
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001951 val = x86_perf_event_update(event, hwc, idx);
1952 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001953 continue;
1954
1955 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001956 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001957 */
1958 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001959 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001960
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001961 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001962 continue;
1963
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001964 if (perf_event_overflow(event, 1, &data, regs))
1965 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001966 }
1967
1968 if (handled)
1969 inc_irq_stat(apic_perf_irqs);
1970
1971 return handled;
1972}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001973
Ingo Molnar241771e2008-12-03 10:39:53 +01001974/*
1975 * This handler is triggered by the local APIC, so the APIC IRQ handling
1976 * rules apply:
1977 */
Yong Wanga3288102009-06-03 13:12:55 +08001978static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001979{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001980 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001981 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001982 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001983 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001984
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001985 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001986 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001987
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001988 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001989
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001990 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001991 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001992 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001993 if (!status) {
1994 perf_enable();
1995 return 0;
1996 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001997
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001998 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001999again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002000 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002001 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2002 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02002003 intel_pmu_reset();
2004 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002005 return 1;
2006 }
2007
Mike Galbraithd278c482009-02-09 07:38:50 +01002008 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002009 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01002010 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002011 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01002012
2013 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02002014 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01002015 continue;
2016
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002017 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002018 continue;
2019
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002020 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02002021
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002022 if (perf_event_overflow(event, 1, &data, regs))
2023 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01002024 }
2025
Robert Richterdee5d902009-04-29 12:47:07 +02002026 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01002027
2028 /*
2029 * Repeat if there is more work to be done:
2030 */
Robert Richter19d84da2009-04-29 12:47:25 +02002031 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01002032 if (status)
2033 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002034
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002035 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002036
2037 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01002038}
2039
Yong Wanga3288102009-06-03 13:12:55 +08002040static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02002041{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002042 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002043 struct cpu_hw_events *cpuc;
2044 struct perf_event *event;
2045 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04002046 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002047 u64 val;
2048
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002049 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002050 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002051
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002052 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002053
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002054 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02002055 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02002056 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002057
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002058 event = cpuc->events[idx];
2059 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002060
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 val = x86_perf_event_update(event, hwc, idx);
2062 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002063 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002064
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002065 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002066 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002067 */
2068 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002069 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002070
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002071 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002072 continue;
2073
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002074 if (perf_event_overflow(event, 1, &data, regs))
2075 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002076 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002077
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002078 if (handled)
2079 inc_irq_stat(apic_perf_irqs);
2080
Robert Richtera29aa8a2009-04-29 12:47:21 +02002081 return handled;
2082}
Robert Richter39d81ea2009-04-29 12:47:05 +02002083
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002084void smp_perf_pending_interrupt(struct pt_regs *regs)
2085{
2086 irq_enter();
2087 ack_APIC_irq();
2088 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002089 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002090 irq_exit();
2091}
2092
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002093void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002094{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002095#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02002096 if (!x86_pmu.apic || !x86_pmu_initialized())
2097 return;
2098
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002099 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002100#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002101}
2102
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002103void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002104{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002105#ifdef CONFIG_X86_LOCAL_APIC
2106 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01002107 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02002108
Ingo Molnar241771e2008-12-03 10:39:53 +01002109 /*
Yong Wangc323d952009-05-29 13:28:35 +08002110 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01002111 */
Yong Wangc323d952009-05-29 13:28:35 +08002112 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002113#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01002114}
2115
2116static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002117perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01002118 unsigned long cmd, void *__args)
2119{
2120 struct die_args *args = __args;
2121 struct pt_regs *regs;
2122
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002123 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02002124 return NOTIFY_DONE;
2125
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002126 switch (cmd) {
2127 case DIE_NMI:
2128 case DIE_NMI_IPI:
2129 break;
2130
2131 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01002132 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002133 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002134
2135 regs = args->regs;
2136
Ingo Molnar04da8a42009-08-11 10:40:08 +02002137#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01002138 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002139#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002140 /*
2141 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002142 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002143 *
2144 * If the first NMI handles both, the latter will be empty and daze
2145 * the CPU.
2146 */
Yong Wanga3288102009-06-03 13:12:55 +08002147 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002148
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002149 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01002150}
2151
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002152static struct event_constraint bts_constraint =
2153 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002154
2155static int intel_special_constraints(struct perf_event *event,
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002156 unsigned long *idxmsk)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002157{
2158 unsigned int hw_event;
2159
2160 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2161
2162 if (unlikely((hw_event ==
2163 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2164 (event->hw.sample_period == 1))) {
2165
2166 bitmap_copy((unsigned long *)idxmsk,
2167 (unsigned long *)bts_constraint.idxmsk,
2168 X86_PMC_IDX_MAX);
2169 return 1;
2170 }
2171 return 0;
2172}
2173
2174static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2175 struct perf_event *event,
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002176 unsigned long *idxmsk)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002177{
2178 const struct event_constraint *c;
2179
2180 /*
2181 * cleanup bitmask
2182 */
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002183 bitmap_zero(idxmsk, X86_PMC_IDX_MAX);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002184
2185 if (intel_special_constraints(event, idxmsk))
2186 return;
2187
2188 if (x86_pmu.event_constraints) {
2189 for_each_event_constraint(c, x86_pmu.event_constraints) {
2190 if ((event->hw.config & c->cmask) == c->code) {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002191 bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002192 return;
2193 }
2194 }
2195 }
2196 /* no constraints, means supports all generic counters */
2197 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2198}
2199
2200static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2201 struct perf_event *event,
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002202 unsigned long *idxmsk)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002203{
Stephane Eranian81130702010-01-21 17:39:01 +02002204 /* no constraints, means supports all generic counters */
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002205 bitmap_fill(idxmsk, x86_pmu.num_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002206}
2207
2208static int x86_event_sched_in(struct perf_event *event,
2209 struct perf_cpu_context *cpuctx, int cpu)
2210{
2211 int ret = 0;
2212
2213 event->state = PERF_EVENT_STATE_ACTIVE;
2214 event->oncpu = cpu;
2215 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2216
2217 if (!is_x86_event(event))
2218 ret = event->pmu->enable(event);
2219
2220 if (!ret && !is_software_event(event))
2221 cpuctx->active_oncpu++;
2222
2223 if (!ret && event->attr.exclusive)
2224 cpuctx->exclusive = 1;
2225
2226 return ret;
2227}
2228
2229static void x86_event_sched_out(struct perf_event *event,
2230 struct perf_cpu_context *cpuctx, int cpu)
2231{
2232 event->state = PERF_EVENT_STATE_INACTIVE;
2233 event->oncpu = -1;
2234
2235 if (!is_x86_event(event))
2236 event->pmu->disable(event);
2237
2238 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2239
2240 if (!is_software_event(event))
2241 cpuctx->active_oncpu--;
2242
2243 if (event->attr.exclusive || !cpuctx->active_oncpu)
2244 cpuctx->exclusive = 0;
2245}
2246
2247/*
2248 * Called to enable a whole group of events.
2249 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2250 * Assumes the caller has disabled interrupts and has
2251 * frozen the PMU with hw_perf_save_disable.
2252 *
2253 * called with PMU disabled. If successful and return value 1,
2254 * then guaranteed to call perf_enable() and hw_perf_enable()
2255 */
2256int hw_perf_group_sched_in(struct perf_event *leader,
2257 struct perf_cpu_context *cpuctx,
2258 struct perf_event_context *ctx, int cpu)
2259{
2260 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2261 struct perf_event *sub;
2262 int assign[X86_PMC_IDX_MAX];
2263 int n0, n1, ret;
2264
2265 /* n0 = total number of events */
2266 n0 = collect_events(cpuc, leader, true);
2267 if (n0 < 0)
2268 return n0;
2269
2270 ret = x86_schedule_events(cpuc, n0, assign);
2271 if (ret)
2272 return ret;
2273
2274 ret = x86_event_sched_in(leader, cpuctx, cpu);
2275 if (ret)
2276 return ret;
2277
2278 n1 = 1;
2279 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02002280 if (sub->state > PERF_EVENT_STATE_OFF) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02002281 ret = x86_event_sched_in(sub, cpuctx, cpu);
2282 if (ret)
2283 goto undo;
2284 ++n1;
2285 }
2286 }
2287 /*
2288 * copy new assignment, now we know it is possible
2289 * will be used by hw_perf_enable()
2290 */
2291 memcpy(cpuc->assign, assign, n0*sizeof(int));
2292
2293 cpuc->n_events = n0;
2294 cpuc->n_added = n1;
2295 ctx->nr_active += n1;
2296
2297 /*
2298 * 1 means successful and events are active
2299 * This is not quite true because we defer
2300 * actual activation until hw_perf_enable() but
2301 * this way we* ensure caller won't try to enable
2302 * individual events
2303 */
2304 return 1;
2305undo:
2306 x86_event_sched_out(leader, cpuctx, cpu);
2307 n0 = 1;
2308 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2309 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2310 x86_event_sched_out(sub, cpuctx, cpu);
2311 if (++n0 == n1)
2312 break;
2313 }
2314 }
2315 return ret;
2316}
2317
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002318static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2319 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01002320 .next = NULL,
2321 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01002322};
2323
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002324static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04002325 .name = "p6",
2326 .handle_irq = p6_pmu_handle_irq,
2327 .disable_all = p6_pmu_disable_all,
2328 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002329 .enable = p6_pmu_enable_event,
2330 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04002331 .eventsel = MSR_P6_EVNTSEL0,
2332 .perfctr = MSR_P6_PERFCTR0,
2333 .event_map = p6_pmu_event_map,
2334 .raw_event = p6_pmu_raw_event,
2335 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002336 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04002337 .max_period = (1ULL << 31) - 1,
2338 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002339 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04002340 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002341 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04002342 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002343 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04002344 *
2345 * See IA-32 Intel Architecture Software developer manual Vol 3B
2346 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002347 .event_bits = 32,
2348 .event_mask = (1ULL << 32) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002349 .get_event_constraints = intel_get_event_constraints,
2350 .event_constraints = intel_p6_event_constraints
Vince Weaver11d15782009-07-08 17:46:14 -04002351};
2352
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002353static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002354 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02002355 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002356 .disable_all = intel_pmu_disable_all,
2357 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002358 .enable = intel_pmu_enable_event,
2359 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302360 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2361 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002362 .event_map = intel_pmu_event_map,
2363 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302364 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002365 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002366 /*
2367 * Intel PMCs cannot be accessed sanely above 32 bit width,
2368 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002369 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002370 */
2371 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002372 .enable_bts = intel_pmu_enable_bts,
2373 .disable_bts = intel_pmu_disable_bts,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002374 .get_event_constraints = intel_get_event_constraints
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302375};
2376
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002377static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002378 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02002379 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002380 .disable_all = amd_pmu_disable_all,
2381 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002382 .enable = amd_pmu_enable_event,
2383 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302384 .eventsel = MSR_K7_EVNTSEL0,
2385 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002386 .event_map = amd_pmu_event_map,
2387 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302388 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002389 .num_events = 4,
2390 .event_bits = 48,
2391 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002392 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002393 /* use highest bit to detect overflow */
2394 .max_period = (1ULL << 47) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002395 .get_event_constraints = amd_get_event_constraints
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302396};
2397
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002398static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002399{
Vince Weaver11d15782009-07-08 17:46:14 -04002400 switch (boot_cpu_data.x86_model) {
2401 case 1:
2402 case 3: /* Pentium Pro */
2403 case 5:
2404 case 6: /* Pentium II */
2405 case 7:
2406 case 8:
2407 case 11: /* Pentium III */
Vince Weaver11d15782009-07-08 17:46:14 -04002408 case 9:
2409 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002410 /* Pentium M */
2411 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002412 default:
2413 pr_cont("unsupported p6 CPU model %d ",
2414 boot_cpu_data.x86_model);
2415 return -ENODEV;
2416 }
2417
Ingo Molnar04da8a42009-08-11 10:40:08 +02002418 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002419
Vince Weaver11d15782009-07-08 17:46:14 -04002420 return 0;
2421}
2422
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002423static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002424{
Ingo Molnar703e9372008-12-17 10:51:15 +01002425 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002426 union cpuid10_eax eax;
2427 unsigned int unused;
2428 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002429 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002430
Vince Weaver11d15782009-07-08 17:46:14 -04002431 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2432 /* check for P6 processor family */
2433 if (boot_cpu_data.x86 == 6) {
2434 return p6_pmu_init();
2435 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002436 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002437 }
2438 }
Robert Richterda1a7762009-04-29 12:46:58 +02002439
Ingo Molnar241771e2008-12-03 10:39:53 +01002440 /*
2441 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002442 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002443 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002444 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002445 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002446 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002447
Robert Richterfaa28ae2009-04-29 12:47:13 +02002448 version = eax.split.version_id;
2449 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02002450 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002451
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002452 x86_pmu = intel_pmu;
2453 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002454 x86_pmu.num_events = eax.split.num_events;
2455 x86_pmu.event_bits = eax.split.bit_width;
2456 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002457
2458 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002459 * Quirk: v2 perfmon does not report fixed-purpose events, so
2460 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002461 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002462 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302463
Ingo Molnar8326f442009-06-05 20:22:46 +02002464 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002465 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002466 */
2467 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08002468 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2469 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2470 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2471 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002472 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002473 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002474
Stephane Eranian1da53e02010-01-18 10:58:01 +02002475 x86_pmu.event_constraints = intel_core_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002476 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002477 break;
Ingo Molnar8326f442009-06-05 20:22:46 +02002478 case 26:
2479 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002480 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002481
Stephane Eranian1da53e02010-01-18 10:58:01 +02002482 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002483 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002484 break;
2485 case 28:
2486 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002487 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002488
Stephane Eranian1da53e02010-01-18 10:58:01 +02002489 x86_pmu.event_constraints = intel_gen_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002490 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002491 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002492 default:
2493 /*
2494 * default constraints for v2 and up
2495 */
2496 x86_pmu.event_constraints = intel_gen_event_constraints;
2497 pr_cont("generic architected perfmon, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002498 }
Robert Richter72eae042009-04-29 12:47:10 +02002499 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302500}
2501
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002502static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302503{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302504 /* Performance-monitoring supported from K7 and later: */
2505 if (boot_cpu_data.x86 < 6)
2506 return -ENODEV;
2507
Robert Richter4a06bd82009-04-29 12:47:11 +02002508 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002509
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302510 /* Events are common for all AMDs */
2511 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2512 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002513
Robert Richter72eae042009-04-29 12:47:10 +02002514 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302515}
2516
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002517static void __init pmu_check_apic(void)
2518{
2519 if (cpu_has_apic)
2520 return;
2521
2522 x86_pmu.apic = 0;
2523 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2524 pr_info("no hardware sampling interrupt available.\n");
2525}
2526
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002527void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302528{
Robert Richter72eae042009-04-29 12:47:10 +02002529 int err;
2530
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002531 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002532
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302533 switch (boot_cpu_data.x86_vendor) {
2534 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002535 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302536 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302537 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002538 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302539 break;
Robert Richter41389602009-04-29 12:47:00 +02002540 default:
2541 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302542 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002543 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002544 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302545 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002546 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302547
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002548 pmu_check_apic();
2549
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002550 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002551
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002552 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2553 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2554 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2555 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002556 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002557 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2558 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002559
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002560 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2561 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2562 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2563 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002564 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002565
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002566 perf_event_mask |=
2567 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2568 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002569
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002570 perf_events_lapic_init();
2571 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002572
Ingo Molnar57c0c152009-09-21 12:20:38 +02002573 pr_info("... version: %d\n", x86_pmu.version);
2574 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2575 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2576 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2577 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2578 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2579 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002580}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002581
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002582static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002583{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002584 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002585}
2586
Robert Richter4aeb0b42009-04-29 12:47:03 +02002587static const struct pmu pmu = {
2588 .enable = x86_pmu_enable,
2589 .disable = x86_pmu_disable,
2590 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002591 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002592};
2593
Stephane Eranian1da53e02010-01-18 10:58:01 +02002594/*
2595 * validate a single event group
2596 *
2597 * validation include:
2598 * - check events are compatible which each other
2599 * - events do not compete for the same counter
2600 * - number of events <= number of counters
2601 *
2602 * validation ensures the group can be loaded onto the
2603 * PMU if it was the only group available.
2604 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002605static int validate_group(struct perf_event *event)
2606{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002607 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002608 struct cpu_hw_events *fake_cpuc;
2609 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002610
Peter Zijlstra502568d2010-01-22 14:35:46 +01002611 ret = -ENOMEM;
2612 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2613 if (!fake_cpuc)
2614 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002615
Stephane Eranian1da53e02010-01-18 10:58:01 +02002616 /*
2617 * the event is not yet connected with its
2618 * siblings therefore we must first collect
2619 * existing siblings, then add the new event
2620 * before we can simulate the scheduling
2621 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002622 ret = -ENOSPC;
2623 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002624 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002625 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002626
Peter Zijlstra502568d2010-01-22 14:35:46 +01002627 fake_cpuc->n_events = n;
2628 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002629 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002630 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002631
Peter Zijlstra502568d2010-01-22 14:35:46 +01002632 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002633
Peter Zijlstra502568d2010-01-22 14:35:46 +01002634 ret = x86_schedule_events(fake_cpuc, n, NULL);
2635
2636out_free:
2637 kfree(fake_cpuc);
2638out:
2639 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002640}
2641
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002642const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002643{
Stephane Eranian81130702010-01-21 17:39:01 +02002644 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002645 int err;
2646
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002647 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002648 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002649 /*
2650 * we temporarily connect event to its pmu
2651 * such that validate_group() can classify
2652 * it as an x86 event using is_x86_event()
2653 */
2654 tmp = event->pmu;
2655 event->pmu = &pmu;
2656
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002657 if (event->group_leader != event)
2658 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002659
2660 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002661 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002662 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002663 if (event->destroy)
2664 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002665 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002666 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002667
Robert Richter4aeb0b42009-04-29 12:47:03 +02002668 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002669}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002670
2671/*
2672 * callchain support
2673 */
2674
2675static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002676void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002677{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002678 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002679 entry->ip[entry->nr++] = ip;
2680}
2681
Tejun Heo245b2e72009-06-24 15:13:48 +09002682static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2683static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002684
2685
2686static void
2687backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2688{
2689 /* Ignore warnings */
2690}
2691
2692static void backtrace_warning(void *data, char *msg)
2693{
2694 /* Ignore warnings */
2695}
2696
2697static int backtrace_stack(void *data, char *name)
2698{
Ingo Molnar038e8362009-06-15 09:57:59 +02002699 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002700}
2701
2702static void backtrace_address(void *data, unsigned long addr, int reliable)
2703{
2704 struct perf_callchain_entry *entry = data;
2705
2706 if (reliable)
2707 callchain_store(entry, addr);
2708}
2709
2710static const struct stacktrace_ops backtrace_ops = {
2711 .warning = backtrace_warning,
2712 .warning_symbol = backtrace_warning_symbol,
2713 .stack = backtrace_stack,
2714 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002715 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002716};
2717
Ingo Molnar038e8362009-06-15 09:57:59 +02002718#include "../dumpstack.h"
2719
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002720static void
2721perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2722{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002723 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002724 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002725
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01002726 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002727}
2728
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002729/*
2730 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2731 */
2732static unsigned long
2733copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002734{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002735 unsigned long offset, addr = (unsigned long)from;
2736 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2737 unsigned long size, len = 0;
2738 struct page *page;
2739 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002740 int ret;
2741
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002742 do {
2743 ret = __get_user_pages_fast(addr, 1, 0, &page);
2744 if (!ret)
2745 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002746
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002747 offset = addr & (PAGE_SIZE - 1);
2748 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002749
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002750 map = kmap_atomic(page, type);
2751 memcpy(to, map+offset, size);
2752 kunmap_atomic(map, type);
2753 put_page(page);
2754
2755 len += size;
2756 to += size;
2757 addr += size;
2758
2759 } while (len < n);
2760
2761 return len;
2762}
2763
2764static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2765{
2766 unsigned long bytes;
2767
2768 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2769
2770 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002771}
2772
2773static void
2774perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2775{
2776 struct stack_frame frame;
2777 const void __user *fp;
2778
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002779 if (!user_mode(regs))
2780 regs = task_pt_regs(current);
2781
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002782 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002783
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002784 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002785 callchain_store(entry, regs->ip);
2786
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002787 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002788 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002789 frame.return_address = 0;
2790
2791 if (!copy_stack_frame(fp, &frame))
2792 break;
2793
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002794 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002795 break;
2796
2797 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002798 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002799 }
2800}
2801
2802static void
2803perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2804{
2805 int is_user;
2806
2807 if (!regs)
2808 return;
2809
2810 is_user = user_mode(regs);
2811
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002812 if (is_user && current->state != TASK_RUNNING)
2813 return;
2814
2815 if (!is_user)
2816 perf_callchain_kernel(regs, entry);
2817
2818 if (current->mm)
2819 perf_callchain_user(regs, entry);
2820}
2821
2822struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2823{
2824 struct perf_callchain_entry *entry;
2825
2826 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002827 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002828 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002829 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002830
2831 entry->nr = 0;
2832
2833 perf_do_callchain(regs, entry);
2834
2835 return entry;
2836}
Markus Metzger30dd5682009-07-21 15:56:48 +02002837
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002838void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002839{
2840 init_debug_store_on_cpu(cpu);
2841}