blob: bd8743024204bd126b31b8cfd39f85892a399bf8 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Ingo Molnar241771e2008-12-03 10:39:53 +010010 *
11 * For licencing details see kernel-base/COPYING
12 */
13
Ingo Molnarcdd6c482009-09-21 12:02:48 +020014#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010015#include <linux/capability.h>
16#include <linux/notifier.h>
17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010019#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010020#include <linux/kdebug.h>
21#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020022#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020023#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020024#include <linux/cpu.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010025
Ingo Molnar241771e2008-12-03 10:39:53 +010026#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020027#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020028#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010029
Ingo Molnarcdd6c482009-09-21 12:02:48 +020030static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032/* The maximal number of PEBS events: */
33#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020034
35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24
37
38/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020039#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020040
41/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020042#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020043
44
45/*
46 * Bits in the debugctlmsr controlling branch tracing.
47 */
48#define X86_DEBUGCTL_TR (1 << 6)
49#define X86_DEBUGCTL_BTS (1 << 7)
50#define X86_DEBUGCTL_BTINT (1 << 8)
51#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
53
54/*
55 * A debug store configuration.
56 *
57 * We only support architectures that use 64bit fields.
58 */
59struct debug_store {
60 u64 bts_buffer_base;
61 u64 bts_index;
62 u64 bts_absolute_maximum;
63 u64 bts_interrupt_threshold;
64 u64 pebs_buffer_base;
65 u64 pebs_index;
66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020068 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020069};
70
Ingo Molnarcdd6c482009-09-21 12:02:48 +020071struct cpu_hw_events {
72 struct perf_event *events[X86_PMC_IDX_MAX];
Robert Richter43f62012009-04-29 16:55:56 +020073 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010075 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010076 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020077 struct debug_store *ds;
Ingo Molnar241771e2008-12-03 10:39:53 +010078};
79
Stephane Eranianb6900812009-10-06 16:42:09 +020080struct event_constraint {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
82 int code;
83};
84
85#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
86#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
87
88#define for_each_event_constraint(e, c) \
89 for ((e) = (c); (e)->idxmsk[0]; (e)++)
90
91
Ingo Molnar241771e2008-12-03 10:39:53 +010092/*
Robert Richter5f4ec282009-04-29 12:47:04 +020093 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010094 */
Robert Richter5f4ec282009-04-29 12:47:04 +020095struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020096 const char *name;
97 int version;
Yong Wanga3288102009-06-03 13:12:55 +080098 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020099 void (*disable_all)(void);
100 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200101 void (*enable)(struct hw_perf_event *, int);
102 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530103 unsigned eventsel;
104 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100105 u64 (*event_map)(int);
106 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530107 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200108 int num_events;
109 int num_events_fixed;
110 int event_bits;
111 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200112 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200113 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200114 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200115 void (*enable_bts)(u64 config);
116 void (*disable_bts)(void);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200117 int (*get_event_idx)(struct cpu_hw_events *cpuc,
118 struct hw_perf_event *hwc);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530119};
120
Robert Richter4a06bd82009-04-29 12:47:11 +0200121static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530122
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200123static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100124 .enabled = 1,
125};
Ingo Molnar241771e2008-12-03 10:39:53 +0100126
Ingo Molnar7a693d32009-10-13 08:16:30 +0200127static const struct event_constraint *event_constraints;
Stephane Eranianb6900812009-10-06 16:42:09 +0200128
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530129/*
Vince Weaver11d15782009-07-08 17:46:14 -0400130 * Not sure about some of these
131 */
132static const u64 p6_perfmon_event_map[] =
133{
134 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
135 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200136 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
137 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400138 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
139 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
140 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
141};
142
Ingo Molnardfc65092009-09-21 11:31:35 +0200143static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400144{
Ingo Molnardfc65092009-09-21 11:31:35 +0200145 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400146}
147
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200148/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200149 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200150 * We use this to effectively disable a counter.
151 *
152 * L2_RQSTS with 0 MESI unit mask.
153 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200154#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200155
Ingo Molnardfc65092009-09-21 11:31:35 +0200156static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400157{
158#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
159#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
160#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
161#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200162#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400163
164#define P6_EVNTSEL_MASK \
165 (P6_EVNTSEL_EVENT_MASK | \
166 P6_EVNTSEL_UNIT_MASK | \
167 P6_EVNTSEL_EDGE_MASK | \
168 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200169 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400170
Ingo Molnardfc65092009-09-21 11:31:35 +0200171 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400172}
173
Stephane Eranianb6900812009-10-06 16:42:09 +0200174static const struct event_constraint intel_p6_event_constraints[] =
175{
176 EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
177 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
178 EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
179 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
180 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
181 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
182 EVENT_CONSTRAINT_END
183};
Vince Weaver11d15782009-07-08 17:46:14 -0400184
185/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530186 * Intel PerfMon v3. Used on Core2 and later.
187 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100188static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100189{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200190 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
191 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
192 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
193 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
194 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
195 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
196 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100197};
198
Stephane Eranianb6900812009-10-06 16:42:09 +0200199static const struct event_constraint intel_core_event_constraints[] =
200{
201 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
202 EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
203 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
204 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
205 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
206 EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
207 EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
208 EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
209 EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
210 EVENT_CONSTRAINT_END
211};
212
213static const struct event_constraint intel_nehalem_event_constraints[] =
214{
215 EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
216 EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
217 EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
218 EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
219 EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
220 EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
221 EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
222 EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
223 EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
224 EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
225 EVENT_CONSTRAINT_END
226};
227
Ingo Molnardfc65092009-09-21 11:31:35 +0200228static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530229{
Ingo Molnardfc65092009-09-21 11:31:35 +0200230 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530231}
Ingo Molnar241771e2008-12-03 10:39:53 +0100232
Ingo Molnar8326f442009-06-05 20:22:46 +0200233/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200234 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200235 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200236 * 'not supported', -1 means 'hw_event makes no sense on
237 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200238 * ID.
239 */
240
241#define C(x) PERF_COUNT_HW_CACHE_##x
242
243static u64 __read_mostly hw_cache_event_ids
244 [PERF_COUNT_HW_CACHE_MAX]
245 [PERF_COUNT_HW_CACHE_OP_MAX]
246 [PERF_COUNT_HW_CACHE_RESULT_MAX];
247
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900248static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200249 [PERF_COUNT_HW_CACHE_MAX]
250 [PERF_COUNT_HW_CACHE_OP_MAX]
251 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
252{
253 [ C(L1D) ] = {
254 [ C(OP_READ) ] = {
255 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
256 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
257 },
258 [ C(OP_WRITE) ] = {
259 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
260 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
261 },
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
264 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
265 },
266 },
267 [ C(L1I ) ] = {
268 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800269 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200270 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
271 },
272 [ C(OP_WRITE) ] = {
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
275 },
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = 0x0,
278 [ C(RESULT_MISS) ] = 0x0,
279 },
280 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200281 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200282 [ C(OP_READ) ] = {
283 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
284 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
285 },
286 [ C(OP_WRITE) ] = {
287 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
288 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
289 },
290 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200291 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
292 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200293 },
294 },
295 [ C(DTLB) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
298 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
302 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
307 },
308 },
309 [ C(ITLB) ] = {
310 [ C(OP_READ) ] = {
311 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800312 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200313 },
314 [ C(OP_WRITE) ] = {
315 [ C(RESULT_ACCESS) ] = -1,
316 [ C(RESULT_MISS) ] = -1,
317 },
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = -1,
320 [ C(RESULT_MISS) ] = -1,
321 },
322 },
323 [ C(BPU ) ] = {
324 [ C(OP_READ) ] = {
325 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
326 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
327 },
328 [ C(OP_WRITE) ] = {
329 [ C(RESULT_ACCESS) ] = -1,
330 [ C(RESULT_MISS) ] = -1,
331 },
332 [ C(OP_PREFETCH) ] = {
333 [ C(RESULT_ACCESS) ] = -1,
334 [ C(RESULT_MISS) ] = -1,
335 },
336 },
337};
338
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900339static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200340 [PERF_COUNT_HW_CACHE_MAX]
341 [PERF_COUNT_HW_CACHE_OP_MAX]
342 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
343{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200344 [ C(L1D) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
347 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
351 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
355 [ C(RESULT_MISS) ] = 0,
356 },
357 },
358 [ C(L1I ) ] = {
359 [ C(OP_READ) ] = {
360 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
361 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
362 },
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
366 },
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = 0,
369 [ C(RESULT_MISS) ] = 0,
370 },
371 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200372 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200373 [ C(OP_READ) ] = {
374 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
375 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
376 },
377 [ C(OP_WRITE) ] = {
378 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
379 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
380 },
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = 0,
383 [ C(RESULT_MISS) ] = 0,
384 },
385 },
386 [ C(DTLB) ] = {
387 [ C(OP_READ) ] = {
388 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
389 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
390 },
391 [ C(OP_WRITE) ] = {
392 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
393 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
394 },
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = 0,
397 [ C(RESULT_MISS) ] = 0,
398 },
399 },
400 [ C(ITLB) ] = {
401 [ C(OP_READ) ] = {
402 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
403 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
404 },
405 [ C(OP_WRITE) ] = {
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
408 },
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = -1,
411 [ C(RESULT_MISS) ] = -1,
412 },
413 },
414 [ C(BPU ) ] = {
415 [ C(OP_READ) ] = {
416 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
417 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
418 },
419 [ C(OP_WRITE) ] = {
420 [ C(RESULT_ACCESS) ] = -1,
421 [ C(RESULT_MISS) ] = -1,
422 },
423 [ C(OP_PREFETCH) ] = {
424 [ C(RESULT_ACCESS) ] = -1,
425 [ C(RESULT_MISS) ] = -1,
426 },
427 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200428};
429
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900430static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200431 [PERF_COUNT_HW_CACHE_MAX]
432 [PERF_COUNT_HW_CACHE_OP_MAX]
433 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
434{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200435 [ C(L1D) ] = {
436 [ C(OP_READ) ] = {
437 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
438 [ C(RESULT_MISS) ] = 0,
439 },
440 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800441 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200442 [ C(RESULT_MISS) ] = 0,
443 },
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x0,
446 [ C(RESULT_MISS) ] = 0,
447 },
448 },
449 [ C(L1I ) ] = {
450 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800451 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
452 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200453 },
454 [ C(OP_WRITE) ] = {
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
457 },
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = 0,
460 [ C(RESULT_MISS) ] = 0,
461 },
462 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200463 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200464 [ C(OP_READ) ] = {
465 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
466 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
470 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
471 },
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = 0,
474 [ C(RESULT_MISS) ] = 0,
475 },
476 },
477 [ C(DTLB) ] = {
478 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800479 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200480 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
481 },
482 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800483 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200484 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
485 },
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0,
488 [ C(RESULT_MISS) ] = 0,
489 },
490 },
491 [ C(ITLB) ] = {
492 [ C(OP_READ) ] = {
493 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
494 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
495 },
496 [ C(OP_WRITE) ] = {
497 [ C(RESULT_ACCESS) ] = -1,
498 [ C(RESULT_MISS) ] = -1,
499 },
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
503 },
504 },
505 [ C(BPU ) ] = {
506 [ C(OP_READ) ] = {
507 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
508 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
509 },
510 [ C(OP_WRITE) ] = {
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
513 },
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
517 },
518 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200519};
520
Ingo Molnardfc65092009-09-21 11:31:35 +0200521static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100522{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100523#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
524#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200525#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
526#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200527#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100528
Ingo Molnar128f0482009-06-03 22:19:36 +0200529#define CORE_EVNTSEL_MASK \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100530 (CORE_EVNTSEL_EVENT_MASK | \
531 CORE_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200532 CORE_EVNTSEL_EDGE_MASK | \
533 CORE_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200534 CORE_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100535
Ingo Molnardfc65092009-09-21 11:31:35 +0200536 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100537}
538
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900539static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200540 [PERF_COUNT_HW_CACHE_MAX]
541 [PERF_COUNT_HW_CACHE_OP_MAX]
542 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
543{
544 [ C(L1D) ] = {
545 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530546 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
547 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200548 },
549 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530550 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200551 [ C(RESULT_MISS) ] = 0,
552 },
553 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530554 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
555 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200556 },
557 },
558 [ C(L1I ) ] = {
559 [ C(OP_READ) ] = {
560 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
561 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
562 },
563 [ C(OP_WRITE) ] = {
564 [ C(RESULT_ACCESS) ] = -1,
565 [ C(RESULT_MISS) ] = -1,
566 },
567 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530568 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200569 [ C(RESULT_MISS) ] = 0,
570 },
571 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200572 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200573 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530574 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
575 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200576 },
577 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530578 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200579 [ C(RESULT_MISS) ] = 0,
580 },
581 [ C(OP_PREFETCH) ] = {
582 [ C(RESULT_ACCESS) ] = 0,
583 [ C(RESULT_MISS) ] = 0,
584 },
585 },
586 [ C(DTLB) ] = {
587 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530588 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
589 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200590 },
591 [ C(OP_WRITE) ] = {
592 [ C(RESULT_ACCESS) ] = 0,
593 [ C(RESULT_MISS) ] = 0,
594 },
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = 0,
597 [ C(RESULT_MISS) ] = 0,
598 },
599 },
600 [ C(ITLB) ] = {
601 [ C(OP_READ) ] = {
602 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
603 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
604 },
605 [ C(OP_WRITE) ] = {
606 [ C(RESULT_ACCESS) ] = -1,
607 [ C(RESULT_MISS) ] = -1,
608 },
609 [ C(OP_PREFETCH) ] = {
610 [ C(RESULT_ACCESS) ] = -1,
611 [ C(RESULT_MISS) ] = -1,
612 },
613 },
614 [ C(BPU ) ] = {
615 [ C(OP_READ) ] = {
616 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
617 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
618 },
619 [ C(OP_WRITE) ] = {
620 [ C(RESULT_ACCESS) ] = -1,
621 [ C(RESULT_MISS) ] = -1,
622 },
623 [ C(OP_PREFETCH) ] = {
624 [ C(RESULT_ACCESS) ] = -1,
625 [ C(RESULT_MISS) ] = -1,
626 },
627 },
628};
629
Ingo Molnar241771e2008-12-03 10:39:53 +0100630/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530631 * AMD Performance Monitor K7 and later.
632 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100633static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530634{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200635 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
636 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
637 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
638 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
639 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
640 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530641};
642
Ingo Molnardfc65092009-09-21 11:31:35 +0200643static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530644{
Ingo Molnardfc65092009-09-21 11:31:35 +0200645 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530646}
647
Ingo Molnardfc65092009-09-21 11:31:35 +0200648static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100649{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100650#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
651#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200652#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
653#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200654#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100655
656#define K7_EVNTSEL_MASK \
657 (K7_EVNTSEL_EVENT_MASK | \
658 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200659 K7_EVNTSEL_EDGE_MASK | \
660 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200661 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100662
Ingo Molnardfc65092009-09-21 11:31:35 +0200663 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100664}
665
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530666/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200667 * Propagate event elapsed time into the generic event.
668 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100669 * Returns the delta events processed.
670 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200671static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200672x86_perf_event_update(struct perf_event *event,
673 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100674{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200675 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200676 u64 prev_raw_count, new_raw_count;
677 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100678
Markus Metzger30dd5682009-07-21 15:56:48 +0200679 if (idx == X86_PMC_IDX_FIXED_BTS)
680 return 0;
681
Ingo Molnaree060942008-12-13 09:00:03 +0100682 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200683 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100684 *
685 * Our tactic to handle this is to first atomically read and
686 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200687 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100688 */
689again:
690 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200691 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100692
693 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
694 new_raw_count) != prev_raw_count)
695 goto again;
696
697 /*
698 * Now we have the new raw value and have updated the prev
699 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200700 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100701 *
702 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200703 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100704 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200705 delta = (new_raw_count << shift) - (prev_raw_count << shift);
706 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100707
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100709 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200710
711 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100712}
713
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200714static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200715static DEFINE_MUTEX(pmc_reserve_mutex);
716
717static bool reserve_pmc_hardware(void)
718{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200719#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200720 int i;
721
722 if (nmi_watchdog == NMI_LOCAL_APIC)
723 disable_lapic_nmi_watchdog();
724
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200725 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200726 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200727 goto perfctr_fail;
728 }
729
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200730 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200731 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200732 goto eventsel_fail;
733 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200734#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200735
736 return true;
737
Ingo Molnar04da8a42009-08-11 10:40:08 +0200738#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200739eventsel_fail:
740 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200741 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200742
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200743 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200744
745perfctr_fail:
746 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200747 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200748
749 if (nmi_watchdog == NMI_LOCAL_APIC)
750 enable_lapic_nmi_watchdog();
751
752 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200753#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200754}
755
756static void release_pmc_hardware(void)
757{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200758#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200759 int i;
760
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200761 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200762 release_perfctr_nmi(x86_pmu.perfctr + i);
763 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200764 }
765
766 if (nmi_watchdog == NMI_LOCAL_APIC)
767 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200768#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200769}
770
Markus Metzger30dd5682009-07-21 15:56:48 +0200771static inline bool bts_available(void)
772{
773 return x86_pmu.enable_bts != NULL;
774}
775
776static inline void init_debug_store_on_cpu(int cpu)
777{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200778 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200779
780 if (!ds)
781 return;
782
783 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200784 (u32)((u64)(unsigned long)ds),
785 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200786}
787
788static inline void fini_debug_store_on_cpu(int cpu)
789{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200790 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200791 return;
792
793 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
794}
795
796static void release_bts_hardware(void)
797{
798 int cpu;
799
800 if (!bts_available())
801 return;
802
803 get_online_cpus();
804
805 for_each_online_cpu(cpu)
806 fini_debug_store_on_cpu(cpu);
807
808 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200809 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200810
811 if (!ds)
812 continue;
813
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200814 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200815
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200816 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200817 kfree(ds);
818 }
819
820 put_online_cpus();
821}
822
823static int reserve_bts_hardware(void)
824{
825 int cpu, err = 0;
826
827 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200828 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200829
830 get_online_cpus();
831
832 for_each_possible_cpu(cpu) {
833 struct debug_store *ds;
834 void *buffer;
835
836 err = -ENOMEM;
837 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
838 if (unlikely(!buffer))
839 break;
840
841 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
842 if (unlikely(!ds)) {
843 kfree(buffer);
844 break;
845 }
846
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200847 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200848 ds->bts_index = ds->bts_buffer_base;
849 ds->bts_absolute_maximum =
850 ds->bts_buffer_base + BTS_BUFFER_SIZE;
851 ds->bts_interrupt_threshold =
852 ds->bts_absolute_maximum - BTS_OVFL_TH;
853
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200854 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200855 err = 0;
856 }
857
858 if (err)
859 release_bts_hardware();
860 else {
861 for_each_online_cpu(cpu)
862 init_debug_store_on_cpu(cpu);
863 }
864
865 put_online_cpus();
866
867 return err;
868}
869
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200870static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200871{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200872 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200873 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200874 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200875 mutex_unlock(&pmc_reserve_mutex);
876 }
877}
878
Robert Richter85cf9db2009-04-29 12:47:20 +0200879static inline int x86_pmu_initialized(void)
880{
881 return x86_pmu.handle_irq != NULL;
882}
883
Ingo Molnar8326f442009-06-05 20:22:46 +0200884static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200886{
887 unsigned int cache_type, cache_op, cache_result;
888 u64 config, val;
889
890 config = attr->config;
891
892 cache_type = (config >> 0) & 0xff;
893 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
894 return -EINVAL;
895
896 cache_op = (config >> 8) & 0xff;
897 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
898 return -EINVAL;
899
900 cache_result = (config >> 16) & 0xff;
901 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
902 return -EINVAL;
903
904 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
905
906 if (val == 0)
907 return -ENOENT;
908
909 if (val == -1)
910 return -EINVAL;
911
912 hwc->config |= val;
913
914 return 0;
915}
916
Markus Metzger30dd5682009-07-21 15:56:48 +0200917static void intel_pmu_enable_bts(u64 config)
918{
919 unsigned long debugctlmsr;
920
921 debugctlmsr = get_debugctlmsr();
922
923 debugctlmsr |= X86_DEBUGCTL_TR;
924 debugctlmsr |= X86_DEBUGCTL_BTS;
925 debugctlmsr |= X86_DEBUGCTL_BTINT;
926
927 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
928 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
929
930 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
931 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
932
933 update_debugctlmsr(debugctlmsr);
934}
935
936static void intel_pmu_disable_bts(void)
937{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200938 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200939 unsigned long debugctlmsr;
940
941 if (!cpuc->ds)
942 return;
943
944 debugctlmsr = get_debugctlmsr();
945
946 debugctlmsr &=
947 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
948 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
949
950 update_debugctlmsr(debugctlmsr);
951}
952
Ingo Molnaree060942008-12-13 09:00:03 +0100953/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200954 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100955 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200956static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100957{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200958 struct perf_event_attr *attr = &event->attr;
959 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200960 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200961 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100962
Robert Richter85cf9db2009-04-29 12:47:20 +0200963 if (!x86_pmu_initialized())
964 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100965
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200966 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200967 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200968 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200969 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200970 if (!reserve_pmc_hardware())
971 err = -EBUSY;
972 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200973 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200974 }
975 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200976 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200977 mutex_unlock(&pmc_reserve_mutex);
978 }
979 if (err)
980 return err;
981
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200982 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200983
Ingo Molnar241771e2008-12-03 10:39:53 +0100984 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100985 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100986 * (keep 'enabled' bit clear for now)
987 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100988 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100989
Stephane Eranianb6900812009-10-06 16:42:09 +0200990 hwc->idx = -1;
991
Ingo Molnar241771e2008-12-03 10:39:53 +0100992 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100993 * Count user and OS events unless requested not to.
994 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200995 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100996 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200997 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100998 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
999
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001000 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001001 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001002 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001003 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001004 } else {
1005 /*
1006 * If we have a PMU initialized but no APIC
1007 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001008 * events (user-space has to fall back and
1009 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001010 */
1011 if (!x86_pmu.apic)
1012 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001013 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001014
Ingo Molnar241771e2008-12-03 10:39:53 +01001015 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001016 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001017 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001018 if (attr->type == PERF_TYPE_RAW) {
1019 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001020 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001021 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001022
Ingo Molnar8326f442009-06-05 20:22:46 +02001023 if (attr->type == PERF_TYPE_HW_CACHE)
1024 return set_ext_hw_attr(hwc, attr);
1025
1026 if (attr->config >= x86_pmu.max_events)
1027 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001028
Ingo Molnar8326f442009-06-05 20:22:46 +02001029 /*
1030 * The generic map:
1031 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001032 config = x86_pmu.event_map(attr->config);
1033
1034 if (config == 0)
1035 return -ENOENT;
1036
1037 if (config == -1LL)
1038 return -EINVAL;
1039
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001040 /*
1041 * Branch tracing:
1042 */
1043 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001044 (hwc->sample_period == 1)) {
1045 /* BTS is not supported by this architecture. */
1046 if (!bts_available())
1047 return -EOPNOTSUPP;
1048
1049 /* BTS is currently only allowed for user-mode. */
1050 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1051 return -EOPNOTSUPP;
1052 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001053
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001054 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001055
Ingo Molnar241771e2008-12-03 10:39:53 +01001056 return 0;
1057}
1058
Vince Weaver11d15782009-07-08 17:46:14 -04001059static void p6_pmu_disable_all(void)
1060{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001061 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001062 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001063
1064 if (!cpuc->enabled)
1065 return;
1066
1067 cpuc->enabled = 0;
1068 barrier();
1069
1070 /* p6 only has one enable register */
1071 rdmsrl(MSR_P6_EVNTSEL0, val);
1072 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1073 wrmsrl(MSR_P6_EVNTSEL0, val);
1074}
1075
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001076static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001077{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001078 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001079
1080 if (!cpuc->enabled)
1081 return;
1082
1083 cpuc->enabled = 0;
1084 barrier();
1085
Ingo Molnar862a1a52008-12-17 13:09:20 +01001086 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001087
1088 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1089 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001090}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301091
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001092static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301093{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001094 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001095 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001096
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001097 if (!cpuc->enabled)
1098 return;
1099
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001100 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001101 /*
1102 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001104 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001105 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001106 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301107
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001109 u64 val;
1110
Robert Richter43f62012009-04-29 16:55:56 +02001111 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001112 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301113 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001114 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1115 continue;
1116 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1117 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301118 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301119}
1120
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001121void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301122{
Robert Richter85cf9db2009-04-29 12:47:20 +02001123 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001124 return;
1125 return x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301126}
Ingo Molnar241771e2008-12-03 10:39:53 +01001127
Vince Weaver11d15782009-07-08 17:46:14 -04001128static void p6_pmu_enable_all(void)
1129{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001131 unsigned long val;
1132
1133 if (cpuc->enabled)
1134 return;
1135
1136 cpuc->enabled = 1;
1137 barrier();
1138
1139 /* p6 only has one enable register */
1140 rdmsrl(MSR_P6_EVNTSEL0, val);
1141 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1142 wrmsrl(MSR_P6_EVNTSEL0, val);
1143}
1144
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001145static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301146{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001147 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001148
1149 if (cpuc->enabled)
1150 return;
1151
1152 cpuc->enabled = 1;
1153 barrier();
1154
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001155 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001156
1157 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001158 struct perf_event *event =
1159 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001160
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001162 return;
1163
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001164 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001165 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301166}
1167
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001168static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301169{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001170 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301171 int idx;
1172
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001173 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001174 return;
1175
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001176 cpuc->enabled = 1;
1177 barrier();
1178
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001179 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1180 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001181 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001182
Robert Richter43f62012009-04-29 16:55:56 +02001183 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001184 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001185
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001186 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1188 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301189 }
1190}
1191
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001192void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001193{
Robert Richter85cf9db2009-04-29 12:47:20 +02001194 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001195 return;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001196 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001197}
Ingo Molnaree060942008-12-13 09:00:03 +01001198
Robert Richter19d84da2009-04-29 12:47:25 +02001199static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001200{
1201 u64 status;
1202
1203 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1204
1205 return status;
1206}
1207
Robert Richterdee5d902009-04-29 12:47:07 +02001208static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001209{
1210 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1211}
1212
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001213static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001214{
Vince Weaver11d15782009-07-08 17:46:14 -04001215 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001216 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001217}
1218
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001219static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001220{
Vince Weaver11d15782009-07-08 17:46:14 -04001221 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001222}
1223
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001224static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001225intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001226{
1227 int idx = __idx - X86_PMC_IDX_FIXED;
1228 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001229
1230 mask = 0xfULL << (idx * 4);
1231
1232 rdmsrl(hwc->config_base, ctrl_val);
1233 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001234 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1235}
1236
1237static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001238p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001239{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001240 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1241 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001242
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001243 if (cpuc->enabled)
1244 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001245
1246 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001247}
1248
1249static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001250intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001251{
Markus Metzger30dd5682009-07-21 15:56:48 +02001252 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1253 intel_pmu_disable_bts();
1254 return;
1255 }
1256
Robert Richterd4369892009-04-29 12:47:19 +02001257 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1258 intel_pmu_disable_fixed(hwc, idx);
1259 return;
1260 }
1261
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001262 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001263}
1264
1265static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001266amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001267{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001268 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001269}
1270
Tejun Heo245b2e72009-06-24 15:13:48 +09001271static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001272
Ingo Molnaree060942008-12-13 09:00:03 +01001273/*
1274 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001275 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001276 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001277static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001278x86_perf_event_set_period(struct perf_event *event,
1279 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001280{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001281 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001282 s64 period = hwc->sample_period;
1283 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001284
Markus Metzger30dd5682009-07-21 15:56:48 +02001285 if (idx == X86_PMC_IDX_FIXED_BTS)
1286 return 0;
1287
Ingo Molnaree060942008-12-13 09:00:03 +01001288 /*
1289 * If we are way outside a reasoable range then just skip forward:
1290 */
1291 if (unlikely(left <= -period)) {
1292 left = period;
1293 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001294 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001295 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001296 }
1297
1298 if (unlikely(left <= 0)) {
1299 left += period;
1300 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001301 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001302 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001303 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001304 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001305 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001306 */
1307 if (unlikely(left < 2))
1308 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001309
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001310 if (left > x86_pmu.max_period)
1311 left = x86_pmu.max_period;
1312
Tejun Heo245b2e72009-06-24 15:13:48 +09001313 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001314
1315 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001316 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001317 * mark it to be able to extra future deltas:
1318 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001319 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001320
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001321 err = checking_wrmsrl(hwc->event_base + idx,
1322 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001323
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001324 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001325
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001326 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001327}
1328
1329static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001330intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001331{
1332 int idx = __idx - X86_PMC_IDX_FIXED;
1333 u64 ctrl_val, bits, mask;
1334 int err;
1335
1336 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001337 * Enable IRQ generation (0x8),
1338 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1339 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001340 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001341 bits = 0x8ULL;
1342 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1343 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1345 bits |= 0x1;
1346 bits <<= (idx * 4);
1347 mask = 0xfULL << (idx * 4);
1348
1349 rdmsrl(hwc->config_base, ctrl_val);
1350 ctrl_val &= ~mask;
1351 ctrl_val |= bits;
1352 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001353}
1354
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001355static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001356{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001357 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001358 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001359
Peter Zijlstra984b8382009-07-10 09:59:56 +02001360 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001361 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001362 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1363
1364 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001365}
1366
1367
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001368static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001369{
Markus Metzger30dd5682009-07-21 15:56:48 +02001370 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001371 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001372 return;
1373
1374 intel_pmu_enable_bts(hwc->config);
1375 return;
1376 }
1377
Robert Richter7c90cc42009-04-29 12:47:18 +02001378 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1379 intel_pmu_enable_fixed(hwc, idx);
1380 return;
1381 }
1382
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001383 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001384}
1385
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001386static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001387{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001388 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001389
1390 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001391 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001392}
1393
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001394static int fixed_mode_idx(struct hw_perf_event *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +01001395{
Ingo Molnardfc65092009-09-21 11:31:35 +02001396 unsigned int hw_event;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001397
Ingo Molnardfc65092009-09-21 11:31:35 +02001398 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
Markus Metzger30dd5682009-07-21 15:56:48 +02001399
Ingo Molnardfc65092009-09-21 11:31:35 +02001400 if (unlikely((hw_event ==
Markus Metzger30dd5682009-07-21 15:56:48 +02001401 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1402 (hwc->sample_period == 1)))
1403 return X86_PMC_IDX_FIXED_BTS;
1404
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001405 if (!x86_pmu.num_events_fixed)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301406 return -1;
1407
Stephane Eranian04a705df2009-10-06 16:42:08 +02001408 /*
1409 * fixed counters do not take all possible filters
1410 */
1411 if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
1412 return -1;
1413
Ingo Molnardfc65092009-09-21 11:31:35 +02001414 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001415 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Ingo Molnardfc65092009-09-21 11:31:35 +02001416 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001417 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Ingo Molnardfc65092009-09-21 11:31:35 +02001418 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001419 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1420
Ingo Molnar862a1a52008-12-17 13:09:20 +01001421 return -1;
1422}
1423
Ingo Molnaree060942008-12-13 09:00:03 +01001424/*
Stephane Eranianb6900812009-10-06 16:42:09 +02001425 * generic counter allocator: get next free counter
1426 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001427static int
1428gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
Stephane Eranianb6900812009-10-06 16:42:09 +02001429{
Stephane Eranianb6900812009-10-06 16:42:09 +02001430 int idx;
1431
1432 idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
1433 return idx == x86_pmu.num_events ? -1 : idx;
1434}
1435
1436/*
1437 * intel-specific counter allocator: check event constraints
1438 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001439static int
1440intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
Stephane Eranianb6900812009-10-06 16:42:09 +02001441{
Stephane Eranianb6900812009-10-06 16:42:09 +02001442 const struct event_constraint *event_constraint;
1443 int i, code;
1444
Ingo Molnar7a693d32009-10-13 08:16:30 +02001445 if (!event_constraints)
Stephane Eranianb6900812009-10-06 16:42:09 +02001446 goto skip;
1447
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001448 code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
Stephane Eranianb6900812009-10-06 16:42:09 +02001449
Ingo Molnar7a693d32009-10-13 08:16:30 +02001450 for_each_event_constraint(event_constraint, event_constraints) {
Stephane Eranianb6900812009-10-06 16:42:09 +02001451 if (code == event_constraint->code) {
1452 for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
1453 if (!test_and_set_bit(i, cpuc->used_mask))
1454 return i;
1455 }
1456 return -1;
1457 }
1458 }
1459skip:
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001460 return gen_get_event_idx(cpuc, hwc);
Stephane Eranianb6900812009-10-06 16:42:09 +02001461}
1462
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001463static int
1464x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
Ingo Molnar241771e2008-12-03 10:39:53 +01001465{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001466 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001467
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001468 idx = fixed_mode_idx(hwc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001469 if (idx == X86_PMC_IDX_FIXED_BTS) {
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001470 /* BTS is already occupied. */
Markus Metzger30dd5682009-07-21 15:56:48 +02001471 if (test_and_set_bit(idx, cpuc->used_mask))
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001472 return -EAGAIN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001473
1474 hwc->config_base = 0;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001475 hwc->event_base = 0;
Markus Metzger30dd5682009-07-21 15:56:48 +02001476 hwc->idx = idx;
1477 } else if (idx >= 0) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001478 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001479 * Try to get the fixed event, if that is already taken
1480 * then try to get a generic event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001481 */
Robert Richter43f62012009-04-29 16:55:56 +02001482 if (test_and_set_bit(idx, cpuc->used_mask))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001483 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001484
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001485 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1486 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001487 * We set it so that event_base + idx in wrmsr/rdmsr maps to
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001488 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1489 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001490 hwc->event_base =
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001491 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +01001492 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001493 } else {
1494 idx = hwc->idx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001495 /* Try to get the previous generic event again */
Stephane Eranianb6900812009-10-06 16:42:09 +02001496 if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001497try_generic:
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001498 idx = x86_pmu.get_event_idx(cpuc, hwc);
Stephane Eranianb6900812009-10-06 16:42:09 +02001499 if (idx == -1)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001500 return -EAGAIN;
1501
Robert Richter43f62012009-04-29 16:55:56 +02001502 set_bit(idx, cpuc->used_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001503 hwc->idx = idx;
1504 }
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001505 hwc->config_base = x86_pmu.eventsel;
1506 hwc->event_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +01001507 }
1508
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001509 return idx;
1510}
1511
1512/*
1513 * Find a PMC slot for the freshly enabled / scheduled in event:
1514 */
1515static int x86_pmu_enable(struct perf_event *event)
1516{
1517 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1518 struct hw_perf_event *hwc = &event->hw;
1519 int idx;
1520
1521 idx = x86_schedule_event(cpuc, hwc);
1522 if (idx < 0)
1523 return idx;
1524
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001525 perf_events_lapic_init();
Ingo Molnar53b441a2009-05-25 21:41:28 +02001526
Robert Richterd4369892009-04-29 12:47:19 +02001527 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001528
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001529 cpuc->events[idx] = event;
Robert Richter43f62012009-04-29 16:55:56 +02001530 set_bit(idx, cpuc->active_mask);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001531
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001532 x86_perf_event_set_period(event, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001533 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001534
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001535 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001536
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001537 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001538}
1539
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001540static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001541{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001542 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1543 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001544
1545 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001547 return;
1548
1549 x86_pmu.enable(hwc, hwc->idx);
1550}
1551
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001552void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001553{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001554 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001555 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001556 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001557 int cpu, idx;
1558
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001559 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001560 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001561
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001562 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001563
1564 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001565 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001566
Robert Richterfaa28ae2009-04-29 12:47:13 +02001567 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301568 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1569 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1570 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1571 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001572
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301573 pr_info("\n");
1574 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1575 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1576 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1577 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301578 }
Robert Richter43f62012009-04-29 16:55:56 +02001579 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001580
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001581 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001582 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1583 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001584
Tejun Heo245b2e72009-06-24 15:13:48 +09001585 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001586
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301587 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001588 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301589 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001590 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301591 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001592 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001593 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001594 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001595 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1596
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301597 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001598 cpu, idx, pmc_count);
1599 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001600 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001601}
1602
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001603static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001604{
1605 struct debug_store *ds = cpuc->ds;
1606 struct bts_record {
1607 u64 from;
1608 u64 to;
1609 u64 flags;
1610 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001611 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001612 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001613 struct perf_output_handle handle;
1614 struct perf_event_header header;
1615 struct perf_sample_data data;
1616 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001617
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001618 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001619 return;
1620
1621 if (!ds)
1622 return;
1623
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001624 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1625 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001626
Markus Metzger5622f292009-09-15 13:00:23 +02001627 if (top <= at)
1628 return;
1629
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001630 ds->bts_index = ds->bts_buffer_base;
1631
Markus Metzger30dd5682009-07-21 15:56:48 +02001632
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001633 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001634 data.addr = 0;
1635 regs.ip = 0;
1636
1637 /*
1638 * Prepare a generic sample, i.e. fill in the invariant fields.
1639 * We will overwrite the from and to address before we output
1640 * the sample.
1641 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001642 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001643
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001644 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001645 header.size * (top - at), 1, 1))
1646 return;
1647
1648 for (; at < top; at++) {
1649 data.ip = at->from;
1650 data.addr = at->to;
1651
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001652 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001653 }
1654
Markus Metzger5622f292009-09-15 13:00:23 +02001655 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001656
1657 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001658 event->hw.interrupts++;
1659 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001660}
1661
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001662static void x86_pmu_disable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001663{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001664 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1665 struct hw_perf_event *hwc = &event->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +02001666 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001667
Robert Richter09534232009-04-29 12:47:16 +02001668 /*
1669 * Must be done before we disable, otherwise the nmi handler
1670 * could reenable again:
1671 */
Robert Richter43f62012009-04-29 16:55:56 +02001672 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001673 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001674
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001675 /*
1676 * Make sure the cleared pointer becomes visible before we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001677 * (potentially) free the event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001678 */
Robert Richter527e26a2009-04-29 12:47:02 +02001679 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001680
Ingo Molnaree060942008-12-13 09:00:03 +01001681 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001682 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001683 * that we are disabling:
1684 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001685 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001686
1687 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001688 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1689 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001690
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001691 cpuc->events[idx] = NULL;
Robert Richter43f62012009-04-29 16:55:56 +02001692 clear_bit(idx, cpuc->used_mask);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001693
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001694 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001695}
1696
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001697/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001698 * Save and restart an expired event. Called by NMI contexts,
1699 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001700 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001701static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001702{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001703 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001704 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001705 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001706
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001707 x86_perf_event_update(event, hwc, idx);
1708 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001709
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001710 if (event->state == PERF_EVENT_STATE_ACTIVE)
1711 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001712
1713 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001714}
1715
Ingo Molnaraaba9802009-05-26 08:10:00 +02001716static void intel_pmu_reset(void)
1717{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001718 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001719 unsigned long flags;
1720 int idx;
1721
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001722 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001723 return;
1724
1725 local_irq_save(flags);
1726
1727 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1728
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001729 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001730 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1731 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1732 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001733 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001734 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1735 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001736 if (ds)
1737 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001738
1739 local_irq_restore(flags);
1740}
1741
Vince Weaver11d15782009-07-08 17:46:14 -04001742static int p6_pmu_handle_irq(struct pt_regs *regs)
1743{
1744 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001745 struct cpu_hw_events *cpuc;
1746 struct perf_event *event;
1747 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001748 int idx, handled = 0;
1749 u64 val;
1750
Vince Weaver11d15782009-07-08 17:46:14 -04001751 data.addr = 0;
1752
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001753 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001754
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001755 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001756 if (!test_bit(idx, cpuc->active_mask))
1757 continue;
1758
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001759 event = cpuc->events[idx];
1760 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001761
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001762 val = x86_perf_event_update(event, hwc, idx);
1763 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001764 continue;
1765
1766 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001767 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001768 */
1769 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001770 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001771
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001772 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001773 continue;
1774
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001775 if (perf_event_overflow(event, 1, &data, regs))
1776 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001777 }
1778
1779 if (handled)
1780 inc_irq_stat(apic_perf_irqs);
1781
1782 return handled;
1783}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001784
Ingo Molnar241771e2008-12-03 10:39:53 +01001785/*
1786 * This handler is triggered by the local APIC, so the APIC IRQ handling
1787 * rules apply:
1788 */
Yong Wanga3288102009-06-03 13:12:55 +08001789static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001790{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001791 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001792 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001793 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001794 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001795
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001796 data.addr = 0;
1797
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001798 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001799
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001800 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001801 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001802 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001803 if (!status) {
1804 perf_enable();
1805 return 0;
1806 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001807
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001808 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001809again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001810 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001811 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1812 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001813 intel_pmu_reset();
1814 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001815 return 1;
1816 }
1817
Mike Galbraithd278c482009-02-09 07:38:50 +01001818 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001819 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001820 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001821 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01001822
1823 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02001824 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01001825 continue;
1826
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001827 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001828 continue;
1829
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001830 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02001831
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001832 if (perf_event_overflow(event, 1, &data, regs))
1833 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01001834 }
1835
Robert Richterdee5d902009-04-29 12:47:07 +02001836 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01001837
1838 /*
1839 * Repeat if there is more work to be done:
1840 */
Robert Richter19d84da2009-04-29 12:47:25 +02001841 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01001842 if (status)
1843 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001844
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001845 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001846
1847 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01001848}
1849
Yong Wanga3288102009-06-03 13:12:55 +08001850static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001851{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001852 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001853 struct cpu_hw_events *cpuc;
1854 struct perf_event *event;
1855 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001856 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001857 u64 val;
1858
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001859 data.addr = 0;
1860
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001861 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001862
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001863 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001864 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001865 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001866
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001867 event = cpuc->events[idx];
1868 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001869
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001870 val = x86_perf_event_update(event, hwc, idx);
1871 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001872 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001873
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001874 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001875 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001876 */
1877 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001879
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001880 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001881 continue;
1882
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001883 if (perf_event_overflow(event, 1, &data, regs))
1884 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001885 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001886
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001887 if (handled)
1888 inc_irq_stat(apic_perf_irqs);
1889
Robert Richtera29aa8a2009-04-29 12:47:21 +02001890 return handled;
1891}
Robert Richter39d81ea2009-04-29 12:47:05 +02001892
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001893void smp_perf_pending_interrupt(struct pt_regs *regs)
1894{
1895 irq_enter();
1896 ack_APIC_irq();
1897 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001898 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001899 irq_exit();
1900}
1901
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001902void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001903{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001904#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001905 if (!x86_pmu.apic || !x86_pmu_initialized())
1906 return;
1907
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001908 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001909#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001910}
1911
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001912void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001913{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001914#ifdef CONFIG_X86_LOCAL_APIC
1915 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001916 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001917
Ingo Molnar241771e2008-12-03 10:39:53 +01001918 /*
Yong Wangc323d952009-05-29 13:28:35 +08001919 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001920 */
Yong Wangc323d952009-05-29 13:28:35 +08001921 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001922#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001923}
1924
1925static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001926perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001927 unsigned long cmd, void *__args)
1928{
1929 struct die_args *args = __args;
1930 struct pt_regs *regs;
1931
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001932 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001933 return NOTIFY_DONE;
1934
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001935 switch (cmd) {
1936 case DIE_NMI:
1937 case DIE_NMI_IPI:
1938 break;
1939
1940 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001941 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001942 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001943
1944 regs = args->regs;
1945
Ingo Molnar04da8a42009-08-11 10:40:08 +02001946#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001947 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001948#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001949 /*
1950 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001951 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001952 *
1953 * If the first NMI handles both, the latter will be empty and daze
1954 * the CPU.
1955 */
Yong Wanga3288102009-06-03 13:12:55 +08001956 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001957
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001958 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001959}
1960
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001961static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1962 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01001963 .next = NULL,
1964 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01001965};
1966
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09001967static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04001968 .name = "p6",
1969 .handle_irq = p6_pmu_handle_irq,
1970 .disable_all = p6_pmu_disable_all,
1971 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001972 .enable = p6_pmu_enable_event,
1973 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04001974 .eventsel = MSR_P6_EVNTSEL0,
1975 .perfctr = MSR_P6_PERFCTR0,
1976 .event_map = p6_pmu_event_map,
1977 .raw_event = p6_pmu_raw_event,
1978 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02001979 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04001980 .max_period = (1ULL << 31) - 1,
1981 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001982 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04001983 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001984 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04001985 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001986 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04001987 *
1988 * See IA-32 Intel Architecture Software developer manual Vol 3B
1989 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001990 .event_bits = 32,
1991 .event_mask = (1ULL << 32) - 1,
Stephane Eranianb6900812009-10-06 16:42:09 +02001992 .get_event_idx = intel_get_event_idx,
Vince Weaver11d15782009-07-08 17:46:14 -04001993};
1994
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09001995static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001996 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02001997 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001998 .disable_all = intel_pmu_disable_all,
1999 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002000 .enable = intel_pmu_enable_event,
2001 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302002 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2003 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002004 .event_map = intel_pmu_event_map,
2005 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302006 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002007 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002008 /*
2009 * Intel PMCs cannot be accessed sanely above 32 bit width,
2010 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002011 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002012 */
2013 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002014 .enable_bts = intel_pmu_enable_bts,
2015 .disable_bts = intel_pmu_disable_bts,
Stephane Eranianb6900812009-10-06 16:42:09 +02002016 .get_event_idx = intel_get_event_idx,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302017};
2018
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002019static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002020 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02002021 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002022 .disable_all = amd_pmu_disable_all,
2023 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002024 .enable = amd_pmu_enable_event,
2025 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302026 .eventsel = MSR_K7_EVNTSEL0,
2027 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002028 .event_map = amd_pmu_event_map,
2029 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302030 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002031 .num_events = 4,
2032 .event_bits = 48,
2033 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002034 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002035 /* use highest bit to detect overflow */
2036 .max_period = (1ULL << 47) - 1,
Stephane Eranianb6900812009-10-06 16:42:09 +02002037 .get_event_idx = gen_get_event_idx,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302038};
2039
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002040static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002041{
Vince Weaver11d15782009-07-08 17:46:14 -04002042 switch (boot_cpu_data.x86_model) {
2043 case 1:
2044 case 3: /* Pentium Pro */
2045 case 5:
2046 case 6: /* Pentium II */
2047 case 7:
2048 case 8:
2049 case 11: /* Pentium III */
Ingo Molnar7a693d32009-10-13 08:16:30 +02002050 event_constraints = intel_p6_event_constraints;
Vince Weaver11d15782009-07-08 17:46:14 -04002051 break;
2052 case 9:
2053 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002054 /* Pentium M */
Ingo Molnar7a693d32009-10-13 08:16:30 +02002055 event_constraints = intel_p6_event_constraints;
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002056 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002057 default:
2058 pr_cont("unsupported p6 CPU model %d ",
2059 boot_cpu_data.x86_model);
2060 return -ENODEV;
2061 }
2062
Ingo Molnar04da8a42009-08-11 10:40:08 +02002063 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002064
Vince Weaver11d15782009-07-08 17:46:14 -04002065 if (!cpu_has_apic) {
Ingo Molnar3c581a72009-08-11 10:47:36 +02002066 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
Ingo Molnar04da8a42009-08-11 10:40:08 +02002067 pr_info("no hardware sampling interrupt available.\n");
2068 x86_pmu.apic = 0;
Vince Weaver11d15782009-07-08 17:46:14 -04002069 }
Vince Weaver11d15782009-07-08 17:46:14 -04002070
2071 return 0;
2072}
2073
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002074static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002075{
Ingo Molnar703e9372008-12-17 10:51:15 +01002076 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002077 union cpuid10_eax eax;
2078 unsigned int unused;
2079 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002080 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002081
Vince Weaver11d15782009-07-08 17:46:14 -04002082 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2083 /* check for P6 processor family */
2084 if (boot_cpu_data.x86 == 6) {
2085 return p6_pmu_init();
2086 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002087 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002088 }
2089 }
Robert Richterda1a7762009-04-29 12:46:58 +02002090
Ingo Molnar241771e2008-12-03 10:39:53 +01002091 /*
2092 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002093 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002094 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002095 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002096 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002097 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002098
Robert Richterfaa28ae2009-04-29 12:47:13 +02002099 version = eax.split.version_id;
2100 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02002101 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002102
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002103 x86_pmu = intel_pmu;
2104 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002105 x86_pmu.num_events = eax.split.num_events;
2106 x86_pmu.event_bits = eax.split.bit_width;
2107 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002108
2109 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002110 * Quirk: v2 perfmon does not report fixed-purpose events, so
2111 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002112 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002113 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302114
Ingo Molnar8326f442009-06-05 20:22:46 +02002115 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002116 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002117 */
2118 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08002119 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2120 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2121 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2122 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002123 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002124 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002125
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002126 pr_cont("Core2 events, ");
Ingo Molnar7a693d32009-10-13 08:16:30 +02002127 event_constraints = intel_core_event_constraints;
Ingo Molnar8326f442009-06-05 20:22:46 +02002128 break;
2129 default:
2130 case 26:
2131 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002132 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002133
Ingo Molnar7a693d32009-10-13 08:16:30 +02002134 event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002135 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002136 break;
2137 case 28:
2138 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002139 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002140
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002141 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002142 break;
2143 }
Robert Richter72eae042009-04-29 12:47:10 +02002144 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302145}
2146
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002147static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302148{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302149 /* Performance-monitoring supported from K7 and later: */
2150 if (boot_cpu_data.x86 < 6)
2151 return -ENODEV;
2152
Robert Richter4a06bd82009-04-29 12:47:11 +02002153 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002154
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302155 /* Events are common for all AMDs */
2156 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2157 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002158
Robert Richter72eae042009-04-29 12:47:10 +02002159 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302160}
2161
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002162void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302163{
Robert Richter72eae042009-04-29 12:47:10 +02002164 int err;
2165
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002166 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002167
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302168 switch (boot_cpu_data.x86_vendor) {
2169 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002170 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302171 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302172 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002173 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302174 break;
Robert Richter41389602009-04-29 12:47:00 +02002175 default:
2176 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302177 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002178 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002179 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302180 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002181 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302182
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002183 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002184
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002185 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2186 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2187 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2188 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002189 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002190 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2191 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002192
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002193 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2194 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2195 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2196 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002197 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002198
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002199 perf_event_mask |=
2200 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2201 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002202
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002203 perf_events_lapic_init();
2204 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002205
Ingo Molnar57c0c152009-09-21 12:20:38 +02002206 pr_info("... version: %d\n", x86_pmu.version);
2207 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2208 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2209 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2210 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2211 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2212 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002213}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002214
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002215static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002216{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002217 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002218}
2219
Robert Richter4aeb0b42009-04-29 12:47:03 +02002220static const struct pmu pmu = {
2221 .enable = x86_pmu_enable,
2222 .disable = x86_pmu_disable,
2223 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002224 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002225};
2226
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002227static int
2228validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
2229{
2230 struct hw_perf_event fake_event = event->hw;
2231
2232 if (event->pmu != &pmu)
2233 return 0;
2234
2235 return x86_schedule_event(cpuc, &fake_event);
2236}
2237
2238static int validate_group(struct perf_event *event)
2239{
2240 struct perf_event *sibling, *leader = event->group_leader;
2241 struct cpu_hw_events fake_pmu;
2242
2243 memset(&fake_pmu, 0, sizeof(fake_pmu));
2244
2245 if (!validate_event(&fake_pmu, leader))
2246 return -ENOSPC;
2247
2248 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
2249 if (!validate_event(&fake_pmu, sibling))
2250 return -ENOSPC;
2251 }
2252
2253 if (!validate_event(&fake_pmu, event))
2254 return -ENOSPC;
2255
2256 return 0;
2257}
2258
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002259const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002260{
2261 int err;
2262
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002263 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002264 if (!err) {
2265 if (event->group_leader != event)
2266 err = validate_group(event);
2267 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002268 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002269 if (event->destroy)
2270 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002271 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002272 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002273
Robert Richter4aeb0b42009-04-29 12:47:03 +02002274 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002275}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002276
2277/*
2278 * callchain support
2279 */
2280
2281static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002282void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002283{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002284 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002285 entry->ip[entry->nr++] = ip;
2286}
2287
Tejun Heo245b2e72009-06-24 15:13:48 +09002288static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2289static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002290static DEFINE_PER_CPU(int, in_nmi_frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002291
2292
2293static void
2294backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2295{
2296 /* Ignore warnings */
2297}
2298
2299static void backtrace_warning(void *data, char *msg)
2300{
2301 /* Ignore warnings */
2302}
2303
2304static int backtrace_stack(void *data, char *name)
2305{
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002306 per_cpu(in_nmi_frame, smp_processor_id()) =
2307 x86_is_stack_id(NMI_STACK, name);
2308
Ingo Molnar038e8362009-06-15 09:57:59 +02002309 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002310}
2311
2312static void backtrace_address(void *data, unsigned long addr, int reliable)
2313{
2314 struct perf_callchain_entry *entry = data;
2315
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002316 if (per_cpu(in_nmi_frame, smp_processor_id()))
2317 return;
2318
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002319 if (reliable)
2320 callchain_store(entry, addr);
2321}
2322
2323static const struct stacktrace_ops backtrace_ops = {
2324 .warning = backtrace_warning,
2325 .warning_symbol = backtrace_warning_symbol,
2326 .stack = backtrace_stack,
2327 .address = backtrace_address,
2328};
2329
Ingo Molnar038e8362009-06-15 09:57:59 +02002330#include "../dumpstack.h"
2331
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002332static void
2333perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2334{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002335 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002336 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002337
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002338 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002339}
2340
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002341/*
2342 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2343 */
2344static unsigned long
2345copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002346{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002347 unsigned long offset, addr = (unsigned long)from;
2348 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2349 unsigned long size, len = 0;
2350 struct page *page;
2351 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002352 int ret;
2353
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002354 do {
2355 ret = __get_user_pages_fast(addr, 1, 0, &page);
2356 if (!ret)
2357 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002358
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002359 offset = addr & (PAGE_SIZE - 1);
2360 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002361
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002362 map = kmap_atomic(page, type);
2363 memcpy(to, map+offset, size);
2364 kunmap_atomic(map, type);
2365 put_page(page);
2366
2367 len += size;
2368 to += size;
2369 addr += size;
2370
2371 } while (len < n);
2372
2373 return len;
2374}
2375
2376static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2377{
2378 unsigned long bytes;
2379
2380 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2381
2382 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002383}
2384
2385static void
2386perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2387{
2388 struct stack_frame frame;
2389 const void __user *fp;
2390
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002391 if (!user_mode(regs))
2392 regs = task_pt_regs(current);
2393
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002394 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002395
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002396 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002397 callchain_store(entry, regs->ip);
2398
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002399 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002400 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002401 frame.return_address = 0;
2402
2403 if (!copy_stack_frame(fp, &frame))
2404 break;
2405
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002406 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002407 break;
2408
2409 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002410 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002411 }
2412}
2413
2414static void
2415perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2416{
2417 int is_user;
2418
2419 if (!regs)
2420 return;
2421
2422 is_user = user_mode(regs);
2423
2424 if (!current || current->pid == 0)
2425 return;
2426
2427 if (is_user && current->state != TASK_RUNNING)
2428 return;
2429
2430 if (!is_user)
2431 perf_callchain_kernel(regs, entry);
2432
2433 if (current->mm)
2434 perf_callchain_user(regs, entry);
2435}
2436
2437struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2438{
2439 struct perf_callchain_entry *entry;
2440
2441 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002442 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002443 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002444 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002445
2446 entry->nr = 0;
2447
2448 perf_do_callchain(regs, entry);
2449
2450 return entry;
2451}
Markus Metzger30dd5682009-07-21 15:56:48 +02002452
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002453void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002454{
2455 init_debug_store_on_cpu(cpu);
2456}