blob: a3c7adb06b785a54216f354bd7bc61aa8b8700b9 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Ingo Molnar241771e2008-12-03 10:39:53 +010010 *
11 * For licencing details see kernel-base/COPYING
12 */
13
Ingo Molnarcdd6c482009-09-21 12:02:48 +020014#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010015#include <linux/capability.h>
16#include <linux/notifier.h>
17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010019#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010020#include <linux/kdebug.h>
21#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020022#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020023#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020024#include <linux/cpu.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010025
Ingo Molnar241771e2008-12-03 10:39:53 +010026#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020027#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020028#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010029
Ingo Molnarcdd6c482009-09-21 12:02:48 +020030static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032/* The maximal number of PEBS events: */
33#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020034
35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24
37
38/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020039#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020040
41/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020042#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020043
44
45/*
46 * Bits in the debugctlmsr controlling branch tracing.
47 */
48#define X86_DEBUGCTL_TR (1 << 6)
49#define X86_DEBUGCTL_BTS (1 << 7)
50#define X86_DEBUGCTL_BTINT (1 << 8)
51#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
53
54/*
55 * A debug store configuration.
56 *
57 * We only support architectures that use 64bit fields.
58 */
59struct debug_store {
60 u64 bts_buffer_base;
61 u64 bts_index;
62 u64 bts_absolute_maximum;
63 u64 bts_interrupt_threshold;
64 u64 pebs_buffer_base;
65 u64 pebs_index;
66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020068 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020069};
70
Ingo Molnarcdd6c482009-09-21 12:02:48 +020071struct cpu_hw_events {
72 struct perf_event *events[X86_PMC_IDX_MAX];
Robert Richter43f62012009-04-29 16:55:56 +020073 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010075 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010076 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020077 struct debug_store *ds;
Ingo Molnar241771e2008-12-03 10:39:53 +010078};
79
80/*
Robert Richter5f4ec282009-04-29 12:47:04 +020081 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010082 */
Robert Richter5f4ec282009-04-29 12:47:04 +020083struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020084 const char *name;
85 int version;
Yong Wanga3288102009-06-03 13:12:55 +080086 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020087 void (*disable_all)(void);
88 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020089 void (*enable)(struct hw_perf_event *, int);
90 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053091 unsigned eventsel;
92 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010093 u64 (*event_map)(int);
94 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053095 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020096 int num_events;
97 int num_events_fixed;
98 int event_bits;
99 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200100 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200101 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200102 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200103 void (*enable_bts)(u64 config);
104 void (*disable_bts)(void);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530105};
106
Robert Richter4a06bd82009-04-29 12:47:11 +0200107static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530108
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200109static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100110 .enabled = 1,
111};
Ingo Molnar241771e2008-12-03 10:39:53 +0100112
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530113/*
Vince Weaver11d15782009-07-08 17:46:14 -0400114 * Not sure about some of these
115 */
116static const u64 p6_perfmon_event_map[] =
117{
118 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
119 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200120 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
121 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400122 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
123 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
125};
126
Ingo Molnardfc65092009-09-21 11:31:35 +0200127static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400128{
Ingo Molnardfc65092009-09-21 11:31:35 +0200129 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400130}
131
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200132/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200133 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200134 * We use this to effectively disable a counter.
135 *
136 * L2_RQSTS with 0 MESI unit mask.
137 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200138#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200139
Ingo Molnardfc65092009-09-21 11:31:35 +0200140static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400141{
142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
145#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200146#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400147
148#define P6_EVNTSEL_MASK \
149 (P6_EVNTSEL_EVENT_MASK | \
150 P6_EVNTSEL_UNIT_MASK | \
151 P6_EVNTSEL_EDGE_MASK | \
152 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200153 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400154
Ingo Molnardfc65092009-09-21 11:31:35 +0200155 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400156}
157
158
159/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530160 * Intel PerfMon v3. Used on Core2 and later.
161 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100162static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100163{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200164 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
165 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
166 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
167 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
168 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
169 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100171};
172
Ingo Molnardfc65092009-09-21 11:31:35 +0200173static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530174{
Ingo Molnardfc65092009-09-21 11:31:35 +0200175 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530176}
Ingo Molnar241771e2008-12-03 10:39:53 +0100177
Ingo Molnar8326f442009-06-05 20:22:46 +0200178/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200179 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200180 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200181 * 'not supported', -1 means 'hw_event makes no sense on
182 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200183 * ID.
184 */
185
186#define C(x) PERF_COUNT_HW_CACHE_##x
187
188static u64 __read_mostly hw_cache_event_ids
189 [PERF_COUNT_HW_CACHE_MAX]
190 [PERF_COUNT_HW_CACHE_OP_MAX]
191 [PERF_COUNT_HW_CACHE_RESULT_MAX];
192
193static const u64 nehalem_hw_cache_event_ids
194 [PERF_COUNT_HW_CACHE_MAX]
195 [PERF_COUNT_HW_CACHE_OP_MAX]
196 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
197{
198 [ C(L1D) ] = {
199 [ C(OP_READ) ] = {
200 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
201 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
202 },
203 [ C(OP_WRITE) ] = {
204 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
205 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
206 },
207 [ C(OP_PREFETCH) ] = {
208 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
209 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
210 },
211 },
212 [ C(L1I ) ] = {
213 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800214 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200215 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
216 },
217 [ C(OP_WRITE) ] = {
218 [ C(RESULT_ACCESS) ] = -1,
219 [ C(RESULT_MISS) ] = -1,
220 },
221 [ C(OP_PREFETCH) ] = {
222 [ C(RESULT_ACCESS) ] = 0x0,
223 [ C(RESULT_MISS) ] = 0x0,
224 },
225 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200226 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200227 [ C(OP_READ) ] = {
228 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
229 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
230 },
231 [ C(OP_WRITE) ] = {
232 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
233 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
234 },
235 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200236 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
237 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200238 },
239 },
240 [ C(DTLB) ] = {
241 [ C(OP_READ) ] = {
242 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
243 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
244 },
245 [ C(OP_WRITE) ] = {
246 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
247 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
248 },
249 [ C(OP_PREFETCH) ] = {
250 [ C(RESULT_ACCESS) ] = 0x0,
251 [ C(RESULT_MISS) ] = 0x0,
252 },
253 },
254 [ C(ITLB) ] = {
255 [ C(OP_READ) ] = {
256 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800257 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200258 },
259 [ C(OP_WRITE) ] = {
260 [ C(RESULT_ACCESS) ] = -1,
261 [ C(RESULT_MISS) ] = -1,
262 },
263 [ C(OP_PREFETCH) ] = {
264 [ C(RESULT_ACCESS) ] = -1,
265 [ C(RESULT_MISS) ] = -1,
266 },
267 },
268 [ C(BPU ) ] = {
269 [ C(OP_READ) ] = {
270 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
271 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
272 },
273 [ C(OP_WRITE) ] = {
274 [ C(RESULT_ACCESS) ] = -1,
275 [ C(RESULT_MISS) ] = -1,
276 },
277 [ C(OP_PREFETCH) ] = {
278 [ C(RESULT_ACCESS) ] = -1,
279 [ C(RESULT_MISS) ] = -1,
280 },
281 },
282};
283
284static const u64 core2_hw_cache_event_ids
285 [PERF_COUNT_HW_CACHE_MAX]
286 [PERF_COUNT_HW_CACHE_OP_MAX]
287 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
288{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200289 [ C(L1D) ] = {
290 [ C(OP_READ) ] = {
291 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
292 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
293 },
294 [ C(OP_WRITE) ] = {
295 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
296 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
297 },
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
300 [ C(RESULT_MISS) ] = 0,
301 },
302 },
303 [ C(L1I ) ] = {
304 [ C(OP_READ) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
306 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
307 },
308 [ C(OP_WRITE) ] = {
309 [ C(RESULT_ACCESS) ] = -1,
310 [ C(RESULT_MISS) ] = -1,
311 },
312 [ C(OP_PREFETCH) ] = {
313 [ C(RESULT_ACCESS) ] = 0,
314 [ C(RESULT_MISS) ] = 0,
315 },
316 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200317 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200318 [ C(OP_READ) ] = {
319 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
320 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
321 },
322 [ C(OP_WRITE) ] = {
323 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
324 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
325 },
326 [ C(OP_PREFETCH) ] = {
327 [ C(RESULT_ACCESS) ] = 0,
328 [ C(RESULT_MISS) ] = 0,
329 },
330 },
331 [ C(DTLB) ] = {
332 [ C(OP_READ) ] = {
333 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
334 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
335 },
336 [ C(OP_WRITE) ] = {
337 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
338 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
339 },
340 [ C(OP_PREFETCH) ] = {
341 [ C(RESULT_ACCESS) ] = 0,
342 [ C(RESULT_MISS) ] = 0,
343 },
344 },
345 [ C(ITLB) ] = {
346 [ C(OP_READ) ] = {
347 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
348 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
349 },
350 [ C(OP_WRITE) ] = {
351 [ C(RESULT_ACCESS) ] = -1,
352 [ C(RESULT_MISS) ] = -1,
353 },
354 [ C(OP_PREFETCH) ] = {
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
357 },
358 },
359 [ C(BPU ) ] = {
360 [ C(OP_READ) ] = {
361 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
362 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
363 },
364 [ C(OP_WRITE) ] = {
365 [ C(RESULT_ACCESS) ] = -1,
366 [ C(RESULT_MISS) ] = -1,
367 },
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = -1,
370 [ C(RESULT_MISS) ] = -1,
371 },
372 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200373};
374
375static const u64 atom_hw_cache_event_ids
376 [PERF_COUNT_HW_CACHE_MAX]
377 [PERF_COUNT_HW_CACHE_OP_MAX]
378 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
379{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200380 [ C(L1D) ] = {
381 [ C(OP_READ) ] = {
382 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
383 [ C(RESULT_MISS) ] = 0,
384 },
385 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800386 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200387 [ C(RESULT_MISS) ] = 0,
388 },
389 [ C(OP_PREFETCH) ] = {
390 [ C(RESULT_ACCESS) ] = 0x0,
391 [ C(RESULT_MISS) ] = 0,
392 },
393 },
394 [ C(L1I ) ] = {
395 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800396 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
397 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200398 },
399 [ C(OP_WRITE) ] = {
400 [ C(RESULT_ACCESS) ] = -1,
401 [ C(RESULT_MISS) ] = -1,
402 },
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = 0,
405 [ C(RESULT_MISS) ] = 0,
406 },
407 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200408 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200409 [ C(OP_READ) ] = {
410 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
411 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
412 },
413 [ C(OP_WRITE) ] = {
414 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
415 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
416 },
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0,
419 [ C(RESULT_MISS) ] = 0,
420 },
421 },
422 [ C(DTLB) ] = {
423 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800424 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200425 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
426 },
427 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800428 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200429 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
430 },
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0,
433 [ C(RESULT_MISS) ] = 0,
434 },
435 },
436 [ C(ITLB) ] = {
437 [ C(OP_READ) ] = {
438 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
439 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
440 },
441 [ C(OP_WRITE) ] = {
442 [ C(RESULT_ACCESS) ] = -1,
443 [ C(RESULT_MISS) ] = -1,
444 },
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = -1,
447 [ C(RESULT_MISS) ] = -1,
448 },
449 },
450 [ C(BPU ) ] = {
451 [ C(OP_READ) ] = {
452 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
453 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
454 },
455 [ C(OP_WRITE) ] = {
456 [ C(RESULT_ACCESS) ] = -1,
457 [ C(RESULT_MISS) ] = -1,
458 },
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = -1,
461 [ C(RESULT_MISS) ] = -1,
462 },
463 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200464};
465
Ingo Molnardfc65092009-09-21 11:31:35 +0200466static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100467{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100473
Ingo Molnar128f0482009-06-03 22:19:36 +0200474#define CORE_EVNTSEL_MASK \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100475 (CORE_EVNTSEL_EVENT_MASK | \
476 CORE_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200477 CORE_EVNTSEL_EDGE_MASK | \
478 CORE_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200479 CORE_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100480
Ingo Molnardfc65092009-09-21 11:31:35 +0200481 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100482}
483
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530484static const u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200485 [PERF_COUNT_HW_CACHE_MAX]
486 [PERF_COUNT_HW_CACHE_OP_MAX]
487 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
488{
489 [ C(L1D) ] = {
490 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530491 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
492 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200493 },
494 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530495 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200496 [ C(RESULT_MISS) ] = 0,
497 },
498 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530499 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
500 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200501 },
502 },
503 [ C(L1I ) ] = {
504 [ C(OP_READ) ] = {
505 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
506 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
507 },
508 [ C(OP_WRITE) ] = {
509 [ C(RESULT_ACCESS) ] = -1,
510 [ C(RESULT_MISS) ] = -1,
511 },
512 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530513 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200514 [ C(RESULT_MISS) ] = 0,
515 },
516 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200517 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200518 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530519 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
520 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200521 },
522 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530523 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200524 [ C(RESULT_MISS) ] = 0,
525 },
526 [ C(OP_PREFETCH) ] = {
527 [ C(RESULT_ACCESS) ] = 0,
528 [ C(RESULT_MISS) ] = 0,
529 },
530 },
531 [ C(DTLB) ] = {
532 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530533 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
534 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200535 },
536 [ C(OP_WRITE) ] = {
537 [ C(RESULT_ACCESS) ] = 0,
538 [ C(RESULT_MISS) ] = 0,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = 0,
542 [ C(RESULT_MISS) ] = 0,
543 },
544 },
545 [ C(ITLB) ] = {
546 [ C(OP_READ) ] = {
547 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
548 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
549 },
550 [ C(OP_WRITE) ] = {
551 [ C(RESULT_ACCESS) ] = -1,
552 [ C(RESULT_MISS) ] = -1,
553 },
554 [ C(OP_PREFETCH) ] = {
555 [ C(RESULT_ACCESS) ] = -1,
556 [ C(RESULT_MISS) ] = -1,
557 },
558 },
559 [ C(BPU ) ] = {
560 [ C(OP_READ) ] = {
561 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
562 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
563 },
564 [ C(OP_WRITE) ] = {
565 [ C(RESULT_ACCESS) ] = -1,
566 [ C(RESULT_MISS) ] = -1,
567 },
568 [ C(OP_PREFETCH) ] = {
569 [ C(RESULT_ACCESS) ] = -1,
570 [ C(RESULT_MISS) ] = -1,
571 },
572 },
573};
574
Ingo Molnar241771e2008-12-03 10:39:53 +0100575/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530576 * AMD Performance Monitor K7 and later.
577 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100578static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530579{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200580 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
581 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
582 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
583 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
584 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530586};
587
Ingo Molnardfc65092009-09-21 11:31:35 +0200588static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530589{
Ingo Molnardfc65092009-09-21 11:31:35 +0200590 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530591}
592
Ingo Molnardfc65092009-09-21 11:31:35 +0200593static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100594{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
598#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200599#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100600
601#define K7_EVNTSEL_MASK \
602 (K7_EVNTSEL_EVENT_MASK | \
603 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200604 K7_EVNTSEL_EDGE_MASK | \
605 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200606 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100607
Ingo Molnardfc65092009-09-21 11:31:35 +0200608 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100609}
610
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530611/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200612 * Propagate event elapsed time into the generic event.
613 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100614 * Returns the delta events processed.
615 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200616static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200617x86_perf_event_update(struct perf_event *event,
618 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100619{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200620 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200621 u64 prev_raw_count, new_raw_count;
622 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100623
Markus Metzger30dd5682009-07-21 15:56:48 +0200624 if (idx == X86_PMC_IDX_FIXED_BTS)
625 return 0;
626
Ingo Molnaree060942008-12-13 09:00:03 +0100627 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200628 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100629 *
630 * Our tactic to handle this is to first atomically read and
631 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200632 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100633 */
634again:
635 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200636 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100637
638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
639 new_raw_count) != prev_raw_count)
640 goto again;
641
642 /*
643 * Now we have the new raw value and have updated the prev
644 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200645 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100646 *
647 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200648 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100649 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200650 delta = (new_raw_count << shift) - (prev_raw_count << shift);
651 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100652
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200653 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100654 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200655
656 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100657}
658
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200659static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200660static DEFINE_MUTEX(pmc_reserve_mutex);
661
662static bool reserve_pmc_hardware(void)
663{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200664#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200665 int i;
666
667 if (nmi_watchdog == NMI_LOCAL_APIC)
668 disable_lapic_nmi_watchdog();
669
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200670 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200672 goto perfctr_fail;
673 }
674
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200675 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200677 goto eventsel_fail;
678 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200679#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200680
681 return true;
682
Ingo Molnar04da8a42009-08-11 10:40:08 +0200683#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200684eventsel_fail:
685 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200686 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200687
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200688 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200689
690perfctr_fail:
691 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200692 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200693
694 if (nmi_watchdog == NMI_LOCAL_APIC)
695 enable_lapic_nmi_watchdog();
696
697 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200698#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200699}
700
701static void release_pmc_hardware(void)
702{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200703#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200704 int i;
705
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200706 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200707 release_perfctr_nmi(x86_pmu.perfctr + i);
708 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200709 }
710
711 if (nmi_watchdog == NMI_LOCAL_APIC)
712 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200713#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200714}
715
Markus Metzger30dd5682009-07-21 15:56:48 +0200716static inline bool bts_available(void)
717{
718 return x86_pmu.enable_bts != NULL;
719}
720
721static inline void init_debug_store_on_cpu(int cpu)
722{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200723 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200724
725 if (!ds)
726 return;
727
728 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200729 (u32)((u64)(unsigned long)ds),
730 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200731}
732
733static inline void fini_debug_store_on_cpu(int cpu)
734{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200735 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200736 return;
737
738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
739}
740
741static void release_bts_hardware(void)
742{
743 int cpu;
744
745 if (!bts_available())
746 return;
747
748 get_online_cpus();
749
750 for_each_online_cpu(cpu)
751 fini_debug_store_on_cpu(cpu);
752
753 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200754 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200755
756 if (!ds)
757 continue;
758
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200759 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200760
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200761 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200762 kfree(ds);
763 }
764
765 put_online_cpus();
766}
767
768static int reserve_bts_hardware(void)
769{
770 int cpu, err = 0;
771
772 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200773 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200774
775 get_online_cpus();
776
777 for_each_possible_cpu(cpu) {
778 struct debug_store *ds;
779 void *buffer;
780
781 err = -ENOMEM;
782 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
783 if (unlikely(!buffer))
784 break;
785
786 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
787 if (unlikely(!ds)) {
788 kfree(buffer);
789 break;
790 }
791
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200792 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200793 ds->bts_index = ds->bts_buffer_base;
794 ds->bts_absolute_maximum =
795 ds->bts_buffer_base + BTS_BUFFER_SIZE;
796 ds->bts_interrupt_threshold =
797 ds->bts_absolute_maximum - BTS_OVFL_TH;
798
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200799 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200800 err = 0;
801 }
802
803 if (err)
804 release_bts_hardware();
805 else {
806 for_each_online_cpu(cpu)
807 init_debug_store_on_cpu(cpu);
808 }
809
810 put_online_cpus();
811
812 return err;
813}
814
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200815static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200816{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200817 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200818 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200819 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200820 mutex_unlock(&pmc_reserve_mutex);
821 }
822}
823
Robert Richter85cf9db2009-04-29 12:47:20 +0200824static inline int x86_pmu_initialized(void)
825{
826 return x86_pmu.handle_irq != NULL;
827}
828
Ingo Molnar8326f442009-06-05 20:22:46 +0200829static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200830set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200831{
832 unsigned int cache_type, cache_op, cache_result;
833 u64 config, val;
834
835 config = attr->config;
836
837 cache_type = (config >> 0) & 0xff;
838 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
839 return -EINVAL;
840
841 cache_op = (config >> 8) & 0xff;
842 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
843 return -EINVAL;
844
845 cache_result = (config >> 16) & 0xff;
846 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
847 return -EINVAL;
848
849 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
850
851 if (val == 0)
852 return -ENOENT;
853
854 if (val == -1)
855 return -EINVAL;
856
857 hwc->config |= val;
858
859 return 0;
860}
861
Markus Metzger30dd5682009-07-21 15:56:48 +0200862static void intel_pmu_enable_bts(u64 config)
863{
864 unsigned long debugctlmsr;
865
866 debugctlmsr = get_debugctlmsr();
867
868 debugctlmsr |= X86_DEBUGCTL_TR;
869 debugctlmsr |= X86_DEBUGCTL_BTS;
870 debugctlmsr |= X86_DEBUGCTL_BTINT;
871
872 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
873 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
874
875 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
876 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
877
878 update_debugctlmsr(debugctlmsr);
879}
880
881static void intel_pmu_disable_bts(void)
882{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200884 unsigned long debugctlmsr;
885
886 if (!cpuc->ds)
887 return;
888
889 debugctlmsr = get_debugctlmsr();
890
891 debugctlmsr &=
892 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
893 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
894
895 update_debugctlmsr(debugctlmsr);
896}
897
Ingo Molnaree060942008-12-13 09:00:03 +0100898/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200899 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100900 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200901static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100902{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200903 struct perf_event_attr *attr = &event->attr;
904 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200905 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200906 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100907
Robert Richter85cf9db2009-04-29 12:47:20 +0200908 if (!x86_pmu_initialized())
909 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100910
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200911 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200912 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200913 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200914 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200915 if (!reserve_pmc_hardware())
916 err = -EBUSY;
917 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200918 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200919 }
920 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200921 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200922 mutex_unlock(&pmc_reserve_mutex);
923 }
924 if (err)
925 return err;
926
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200927 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200928
Ingo Molnar241771e2008-12-03 10:39:53 +0100929 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100930 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100931 * (keep 'enabled' bit clear for now)
932 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100933 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100934
935 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100936 * Count user and OS events unless requested not to.
937 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200938 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100939 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200940 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100941 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
942
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200943 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200944 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200945 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200946 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200947 } else {
948 /*
949 * If we have a PMU initialized but no APIC
950 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200951 * events (user-space has to fall back and
952 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200953 */
954 if (!x86_pmu.apic)
955 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200956 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200957
Ingo Molnar241771e2008-12-03 10:39:53 +0100958 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200959 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100960 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200961 if (attr->type == PERF_TYPE_RAW) {
962 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +0200963 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100964 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100965
Ingo Molnar8326f442009-06-05 20:22:46 +0200966 if (attr->type == PERF_TYPE_HW_CACHE)
967 return set_ext_hw_attr(hwc, attr);
968
969 if (attr->config >= x86_pmu.max_events)
970 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200971
Ingo Molnar8326f442009-06-05 20:22:46 +0200972 /*
973 * The generic map:
974 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200975 config = x86_pmu.event_map(attr->config);
976
977 if (config == 0)
978 return -ENOENT;
979
980 if (config == -1LL)
981 return -EINVAL;
982
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200983 /*
984 * Branch tracing:
985 */
986 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200987 (hwc->sample_period == 1)) {
988 /* BTS is not supported by this architecture. */
989 if (!bts_available())
990 return -EOPNOTSUPP;
991
992 /* BTS is currently only allowed for user-mode. */
993 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
994 return -EOPNOTSUPP;
995 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200996
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200997 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200998
Ingo Molnar241771e2008-12-03 10:39:53 +0100999 return 0;
1000}
1001
Vince Weaver11d15782009-07-08 17:46:14 -04001002static void p6_pmu_disable_all(void)
1003{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001005 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001006
1007 if (!cpuc->enabled)
1008 return;
1009
1010 cpuc->enabled = 0;
1011 barrier();
1012
1013 /* p6 only has one enable register */
1014 rdmsrl(MSR_P6_EVNTSEL0, val);
1015 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1016 wrmsrl(MSR_P6_EVNTSEL0, val);
1017}
1018
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001019static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001020{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001021 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001022
1023 if (!cpuc->enabled)
1024 return;
1025
1026 cpuc->enabled = 0;
1027 barrier();
1028
Ingo Molnar862a1a52008-12-17 13:09:20 +01001029 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001030
1031 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1032 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001033}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301034
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001035static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301036{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001037 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001038 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001039
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001040 if (!cpuc->enabled)
1041 return;
1042
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001043 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001044 /*
1045 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001046 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001047 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001048 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001049 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301050
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001051 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001052 u64 val;
1053
Robert Richter43f62012009-04-29 16:55:56 +02001054 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001055 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301056 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001057 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1058 continue;
1059 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1060 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301061 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301062}
1063
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001064void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301065{
Robert Richter85cf9db2009-04-29 12:47:20 +02001066 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001067 return;
1068 return x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301069}
Ingo Molnar241771e2008-12-03 10:39:53 +01001070
Vince Weaver11d15782009-07-08 17:46:14 -04001071static void p6_pmu_enable_all(void)
1072{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001073 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001074 unsigned long val;
1075
1076 if (cpuc->enabled)
1077 return;
1078
1079 cpuc->enabled = 1;
1080 barrier();
1081
1082 /* p6 only has one enable register */
1083 rdmsrl(MSR_P6_EVNTSEL0, val);
1084 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1085 wrmsrl(MSR_P6_EVNTSEL0, val);
1086}
1087
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001088static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301089{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001090 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001091
1092 if (cpuc->enabled)
1093 return;
1094
1095 cpuc->enabled = 1;
1096 barrier();
1097
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001099
1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001101 struct perf_event *event =
1102 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001103
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001104 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001105 return;
1106
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001107 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001108 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301109}
1110
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001111static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301112{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001113 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301114 int idx;
1115
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001116 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001117 return;
1118
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001119 cpuc->enabled = 1;
1120 barrier();
1121
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1123 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001124 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001125
Robert Richter43f62012009-04-29 16:55:56 +02001126 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001127 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001128
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001129 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301132 }
1133}
1134
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001135void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001136{
Robert Richter85cf9db2009-04-29 12:47:20 +02001137 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001138 return;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001139 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001140}
Ingo Molnaree060942008-12-13 09:00:03 +01001141
Robert Richter19d84da2009-04-29 12:47:25 +02001142static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001143{
1144 u64 status;
1145
1146 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1147
1148 return status;
1149}
1150
Robert Richterdee5d902009-04-29 12:47:07 +02001151static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001152{
1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1154}
1155
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001156static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001157{
Vince Weaver11d15782009-07-08 17:46:14 -04001158 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001160}
1161
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001162static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001163{
Vince Weaver11d15782009-07-08 17:46:14 -04001164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001165}
1166
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001167static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001168intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001169{
1170 int idx = __idx - X86_PMC_IDX_FIXED;
1171 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001172
1173 mask = 0xfULL << (idx * 4);
1174
1175 rdmsrl(hwc->config_base, ctrl_val);
1176 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001177 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1178}
1179
1180static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001181p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001182{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1184 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001185
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001186 if (cpuc->enabled)
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001188
1189 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001190}
1191
1192static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001193intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001194{
Markus Metzger30dd5682009-07-21 15:56:48 +02001195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1196 intel_pmu_disable_bts();
1197 return;
1198 }
1199
Robert Richterd4369892009-04-29 12:47:19 +02001200 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1201 intel_pmu_disable_fixed(hwc, idx);
1202 return;
1203 }
1204
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001205 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001206}
1207
1208static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001209amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001210{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001211 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001212}
1213
Tejun Heo245b2e72009-06-24 15:13:48 +09001214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001215
Ingo Molnaree060942008-12-13 09:00:03 +01001216/*
1217 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001218 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001219 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001220static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001221x86_perf_event_set_period(struct perf_event *event,
1222 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001223{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001224 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001225 s64 period = hwc->sample_period;
1226 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001227
Markus Metzger30dd5682009-07-21 15:56:48 +02001228 if (idx == X86_PMC_IDX_FIXED_BTS)
1229 return 0;
1230
Ingo Molnaree060942008-12-13 09:00:03 +01001231 /*
1232 * If we are way outside a reasoable range then just skip forward:
1233 */
1234 if (unlikely(left <= -period)) {
1235 left = period;
1236 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001237 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001238 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001239 }
1240
1241 if (unlikely(left <= 0)) {
1242 left += period;
1243 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001244 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001245 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001246 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001247 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001248 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001249 */
1250 if (unlikely(left < 2))
1251 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001252
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001253 if (left > x86_pmu.max_period)
1254 left = x86_pmu.max_period;
1255
Tejun Heo245b2e72009-06-24 15:13:48 +09001256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001257
1258 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001259 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001260 * mark it to be able to extra future deltas:
1261 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001262 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001263
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001264 err = checking_wrmsrl(hwc->event_base + idx,
1265 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001266
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001268
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001269 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001270}
1271
1272static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001273intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001274{
1275 int idx = __idx - X86_PMC_IDX_FIXED;
1276 u64 ctrl_val, bits, mask;
1277 int err;
1278
1279 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001280 * Enable IRQ generation (0x8),
1281 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1282 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001283 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001284 bits = 0x8ULL;
1285 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1286 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001287 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1288 bits |= 0x1;
1289 bits <<= (idx * 4);
1290 mask = 0xfULL << (idx * 4);
1291
1292 rdmsrl(hwc->config_base, ctrl_val);
1293 ctrl_val &= ~mask;
1294 ctrl_val |= bits;
1295 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001296}
1297
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001298static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001299{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001300 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001301 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001302
Peter Zijlstra984b8382009-07-10 09:59:56 +02001303 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001304 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001305 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1306
1307 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001308}
1309
1310
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001311static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001312{
Markus Metzger30dd5682009-07-21 15:56:48 +02001313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001314 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001315 return;
1316
1317 intel_pmu_enable_bts(hwc->config);
1318 return;
1319 }
1320
Robert Richter7c90cc42009-04-29 12:47:18 +02001321 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1322 intel_pmu_enable_fixed(hwc, idx);
1323 return;
1324 }
1325
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001326 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001327}
1328
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001329static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001330{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001332
1333 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001334 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001335}
1336
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001337static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001338fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +01001339{
Ingo Molnardfc65092009-09-21 11:31:35 +02001340 unsigned int hw_event;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001341
Ingo Molnardfc65092009-09-21 11:31:35 +02001342 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
Markus Metzger30dd5682009-07-21 15:56:48 +02001343
Ingo Molnardfc65092009-09-21 11:31:35 +02001344 if (unlikely((hw_event ==
Markus Metzger30dd5682009-07-21 15:56:48 +02001345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS;
1348
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001349 if (!x86_pmu.num_events_fixed)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301350 return -1;
1351
Ingo Molnardfc65092009-09-21 11:31:35 +02001352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001353 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Ingo Molnardfc65092009-09-21 11:31:35 +02001354 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001355 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Ingo Molnardfc65092009-09-21 11:31:35 +02001356 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001357 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1358
Ingo Molnar862a1a52008-12-17 13:09:20 +01001359 return -1;
1360}
1361
Ingo Molnaree060942008-12-13 09:00:03 +01001362/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001363 * Find a PMC slot for the freshly enabled / scheduled in event:
Ingo Molnaree060942008-12-13 09:00:03 +01001364 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001365static int x86_pmu_enable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001366{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001367 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1368 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001369 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001370
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001371 idx = fixed_mode_idx(event, hwc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001372 if (idx == X86_PMC_IDX_FIXED_BTS) {
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001373 /* BTS is already occupied. */
Markus Metzger30dd5682009-07-21 15:56:48 +02001374 if (test_and_set_bit(idx, cpuc->used_mask))
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001375 return -EAGAIN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001376
1377 hwc->config_base = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001378 hwc->event_base = 0;
Markus Metzger30dd5682009-07-21 15:56:48 +02001379 hwc->idx = idx;
1380 } else if (idx >= 0) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001381 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001382 * Try to get the fixed event, if that is already taken
1383 * then try to get a generic event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001384 */
Robert Richter43f62012009-04-29 16:55:56 +02001385 if (test_and_set_bit(idx, cpuc->used_mask))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001386 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001387
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1389 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390 * We set it so that event_base + idx in wrmsr/rdmsr maps to
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1392 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001393 hwc->event_base =
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +01001395 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001396 } else {
1397 idx = hwc->idx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001398 /* Try to get the previous generic event again */
Robert Richter43f62012009-04-29 16:55:56 +02001399 if (test_and_set_bit(idx, cpuc->used_mask)) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001400try_generic:
Robert Richter43f62012009-04-29 16:55:56 +02001401 idx = find_first_zero_bit(cpuc->used_mask,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402 x86_pmu.num_events);
1403 if (idx == x86_pmu.num_events)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001404 return -EAGAIN;
1405
Robert Richter43f62012009-04-29 16:55:56 +02001406 set_bit(idx, cpuc->used_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001407 hwc->idx = idx;
1408 }
Robert Richter4a06bd82009-04-29 12:47:11 +02001409 hwc->config_base = x86_pmu.eventsel;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001410 hwc->event_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +01001411 }
1412
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001413 perf_events_lapic_init();
Ingo Molnar53b441a2009-05-25 21:41:28 +02001414
Robert Richterd4369892009-04-29 12:47:19 +02001415 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001416
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417 cpuc->events[idx] = event;
Robert Richter43f62012009-04-29 16:55:56 +02001418 set_bit(idx, cpuc->active_mask);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001419
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001420 x86_perf_event_set_period(event, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001421 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001422
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001424
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001425 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001426}
1427
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001428static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001429{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001430 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1431 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001432
1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001434 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001435 return;
1436
1437 x86_pmu.enable(hwc, hwc->idx);
1438}
1439
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001440void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001441{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001443 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001444 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001445 int cpu, idx;
1446
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001447 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001448 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001449
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001450 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001451
1452 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001454
Robert Richterfaa28ae2009-04-29 12:47:13 +02001455 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1457 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1458 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1459 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001460
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301461 pr_info("\n");
1462 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1463 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1464 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1465 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301466 }
Robert Richter43f62012009-04-29 16:55:56 +02001467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001468
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001469 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001472
Tejun Heo245b2e72009-06-24 15:13:48 +09001473 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001474
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301475 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001476 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301477 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001478 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001480 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001481 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001482 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1484
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001486 cpu, idx, pmc_count);
1487 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001488 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001489}
1490
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001491static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001492{
1493 struct debug_store *ds = cpuc->ds;
1494 struct bts_record {
1495 u64 from;
1496 u64 to;
1497 u64 flags;
1498 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001499 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001500 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001501 struct perf_output_handle handle;
1502 struct perf_event_header header;
1503 struct perf_sample_data data;
1504 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001505
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001506 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001507 return;
1508
1509 if (!ds)
1510 return;
1511
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001512 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1513 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001514
Markus Metzger5622f292009-09-15 13:00:23 +02001515 if (top <= at)
1516 return;
1517
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001518 ds->bts_index = ds->bts_buffer_base;
1519
Markus Metzger30dd5682009-07-21 15:56:48 +02001520
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001521 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001522 data.addr = 0;
1523 regs.ip = 0;
1524
1525 /*
1526 * Prepare a generic sample, i.e. fill in the invariant fields.
1527 * We will overwrite the from and to address before we output
1528 * the sample.
1529 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001530 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001531
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001532 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001533 header.size * (top - at), 1, 1))
1534 return;
1535
1536 for (; at < top; at++) {
1537 data.ip = at->from;
1538 data.addr = at->to;
1539
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001540 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001541 }
1542
Markus Metzger5622f292009-09-15 13:00:23 +02001543 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001544
1545 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546 event->hw.interrupts++;
1547 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001548}
1549
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001550static void x86_pmu_disable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001551{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001552 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1553 struct hw_perf_event *hwc = &event->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +02001554 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001555
Robert Richter09534232009-04-29 12:47:16 +02001556 /*
1557 * Must be done before we disable, otherwise the nmi handler
1558 * could reenable again:
1559 */
Robert Richter43f62012009-04-29 16:55:56 +02001560 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001561 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001562
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001563 /*
1564 * Make sure the cleared pointer becomes visible before we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001565 * (potentially) free the event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001566 */
Robert Richter527e26a2009-04-29 12:47:02 +02001567 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001568
Ingo Molnaree060942008-12-13 09:00:03 +01001569 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001570 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001571 * that we are disabling:
1572 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001573 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001574
1575 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1577 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001578
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001579 cpuc->events[idx] = NULL;
Robert Richter43f62012009-04-29 16:55:56 +02001580 clear_bit(idx, cpuc->used_mask);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001581
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001582 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001583}
1584
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001585/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001586 * Save and restart an expired event. Called by NMI contexts,
1587 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001588 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001589static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001590{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001591 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001592 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001593 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001594
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001595 x86_perf_event_update(event, hwc, idx);
1596 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001597
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001598 if (event->state == PERF_EVENT_STATE_ACTIVE)
1599 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001600
1601 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001602}
1603
Ingo Molnaraaba9802009-05-26 08:10:00 +02001604static void intel_pmu_reset(void)
1605{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001606 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001607 unsigned long flags;
1608 int idx;
1609
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001610 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001611 return;
1612
1613 local_irq_save(flags);
1614
1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1616
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001617 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1620 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001621 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1623 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001624 if (ds)
1625 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001626
1627 local_irq_restore(flags);
1628}
1629
Vince Weaver11d15782009-07-08 17:46:14 -04001630static int p6_pmu_handle_irq(struct pt_regs *regs)
1631{
1632 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001633 struct cpu_hw_events *cpuc;
1634 struct perf_event *event;
1635 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001636 int idx, handled = 0;
1637 u64 val;
1638
Vince Weaver11d15782009-07-08 17:46:14 -04001639 data.addr = 0;
1640
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001641 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001642
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001643 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001644 if (!test_bit(idx, cpuc->active_mask))
1645 continue;
1646
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647 event = cpuc->events[idx];
1648 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001649
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001650 val = x86_perf_event_update(event, hwc, idx);
1651 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001652 continue;
1653
1654 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001655 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001656 */
1657 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001658 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001659
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001660 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001661 continue;
1662
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001663 if (perf_event_overflow(event, 1, &data, regs))
1664 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001665 }
1666
1667 if (handled)
1668 inc_irq_stat(apic_perf_irqs);
1669
1670 return handled;
1671}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001672
Ingo Molnar241771e2008-12-03 10:39:53 +01001673/*
1674 * This handler is triggered by the local APIC, so the APIC IRQ handling
1675 * rules apply:
1676 */
Yong Wanga3288102009-06-03 13:12:55 +08001677static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001678{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001679 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001680 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001681 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001682 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001683
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001684 data.addr = 0;
1685
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001686 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001687
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001688 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001689 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001690 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001691 if (!status) {
1692 perf_enable();
1693 return 0;
1694 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001695
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001696 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001697again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001698 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001699 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1700 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001701 intel_pmu_reset();
1702 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001703 return 1;
1704 }
1705
Mike Galbraithd278c482009-02-09 07:38:50 +01001706 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001707 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001709 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01001710
1711 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02001712 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01001713 continue;
1714
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001715 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001716 continue;
1717
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001718 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02001719
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001720 if (perf_event_overflow(event, 1, &data, regs))
1721 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01001722 }
1723
Robert Richterdee5d902009-04-29 12:47:07 +02001724 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01001725
1726 /*
1727 * Repeat if there is more work to be done:
1728 */
Robert Richter19d84da2009-04-29 12:47:25 +02001729 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01001730 if (status)
1731 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001732
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001733 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001734
1735 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01001736}
1737
Yong Wanga3288102009-06-03 13:12:55 +08001738static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001739{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001740 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741 struct cpu_hw_events *cpuc;
1742 struct perf_event *event;
1743 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001744 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001745 u64 val;
1746
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001747 data.addr = 0;
1748
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001749 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001750
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001751 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001752 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001753 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001754
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001755 event = cpuc->events[idx];
1756 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001757
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001758 val = x86_perf_event_update(event, hwc, idx);
1759 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001760 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001761
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001762 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001763 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001764 */
1765 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001767
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001768 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001769 continue;
1770
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001771 if (perf_event_overflow(event, 1, &data, regs))
1772 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001773 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001774
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001775 if (handled)
1776 inc_irq_stat(apic_perf_irqs);
1777
Robert Richtera29aa8a2009-04-29 12:47:21 +02001778 return handled;
1779}
Robert Richter39d81ea2009-04-29 12:47:05 +02001780
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001781void smp_perf_pending_interrupt(struct pt_regs *regs)
1782{
1783 irq_enter();
1784 ack_APIC_irq();
1785 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001786 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001787 irq_exit();
1788}
1789
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001790void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001791{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001792#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001793 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001794#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001795}
1796
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001797void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001798{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001799#ifdef CONFIG_X86_LOCAL_APIC
1800 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001801 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001802
Ingo Molnar241771e2008-12-03 10:39:53 +01001803 /*
Yong Wangc323d952009-05-29 13:28:35 +08001804 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001805 */
Yong Wangc323d952009-05-29 13:28:35 +08001806 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001807#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001808}
1809
1810static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001811perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001812 unsigned long cmd, void *__args)
1813{
1814 struct die_args *args = __args;
1815 struct pt_regs *regs;
1816
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001817 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001818 return NOTIFY_DONE;
1819
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001820 switch (cmd) {
1821 case DIE_NMI:
1822 case DIE_NMI_IPI:
1823 break;
1824
1825 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001826 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001827 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001828
1829 regs = args->regs;
1830
Ingo Molnar04da8a42009-08-11 10:40:08 +02001831#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001832 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001833#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001834 /*
1835 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001836 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001837 *
1838 * If the first NMI handles both, the latter will be empty and daze
1839 * the CPU.
1840 */
Yong Wanga3288102009-06-03 13:12:55 +08001841 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001842
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001843 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001844}
1845
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001846static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1847 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01001848 .next = NULL,
1849 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01001850};
1851
Vince Weaver11d15782009-07-08 17:46:14 -04001852static struct x86_pmu p6_pmu = {
1853 .name = "p6",
1854 .handle_irq = p6_pmu_handle_irq,
1855 .disable_all = p6_pmu_disable_all,
1856 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001857 .enable = p6_pmu_enable_event,
1858 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04001859 .eventsel = MSR_P6_EVNTSEL0,
1860 .perfctr = MSR_P6_PERFCTR0,
1861 .event_map = p6_pmu_event_map,
1862 .raw_event = p6_pmu_raw_event,
1863 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02001864 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04001865 .max_period = (1ULL << 31) - 1,
1866 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001867 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04001868 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001869 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04001870 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001871 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04001872 *
1873 * See IA-32 Intel Architecture Software developer manual Vol 3B
1874 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001875 .event_bits = 32,
1876 .event_mask = (1ULL << 32) - 1,
Vince Weaver11d15782009-07-08 17:46:14 -04001877};
1878
Robert Richter5f4ec282009-04-29 12:47:04 +02001879static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001880 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02001881 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001882 .disable_all = intel_pmu_disable_all,
1883 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001884 .enable = intel_pmu_enable_event,
1885 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001888 .event_map = intel_pmu_event_map,
1889 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301890 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02001891 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02001892 /*
1893 * Intel PMCs cannot be accessed sanely above 32 bit width,
1894 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02001896 */
1897 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02001898 .enable_bts = intel_pmu_enable_bts,
1899 .disable_bts = intel_pmu_disable_bts,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301900};
1901
Robert Richter5f4ec282009-04-29 12:47:04 +02001902static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001903 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02001904 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001905 .disable_all = amd_pmu_disable_all,
1906 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001907 .enable = amd_pmu_enable_event,
1908 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301909 .eventsel = MSR_K7_EVNTSEL0,
1910 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001911 .event_map = amd_pmu_event_map,
1912 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001914 .num_events = 4,
1915 .event_bits = 48,
1916 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02001917 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02001918 /* use highest bit to detect overflow */
1919 .max_period = (1ULL << 47) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301920};
1921
Vince Weaver11d15782009-07-08 17:46:14 -04001922static int p6_pmu_init(void)
1923{
Vince Weaver11d15782009-07-08 17:46:14 -04001924 switch (boot_cpu_data.x86_model) {
1925 case 1:
1926 case 3: /* Pentium Pro */
1927 case 5:
1928 case 6: /* Pentium II */
1929 case 7:
1930 case 8:
1931 case 11: /* Pentium III */
1932 break;
1933 case 9:
1934 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07001935 /* Pentium M */
1936 break;
Vince Weaver11d15782009-07-08 17:46:14 -04001937 default:
1938 pr_cont("unsupported p6 CPU model %d ",
1939 boot_cpu_data.x86_model);
1940 return -ENODEV;
1941 }
1942
Ingo Molnar04da8a42009-08-11 10:40:08 +02001943 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04001944
Vince Weaver11d15782009-07-08 17:46:14 -04001945 if (!cpu_has_apic) {
Ingo Molnar3c581a72009-08-11 10:47:36 +02001946 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
Ingo Molnar04da8a42009-08-11 10:40:08 +02001947 pr_info("no hardware sampling interrupt available.\n");
1948 x86_pmu.apic = 0;
Vince Weaver11d15782009-07-08 17:46:14 -04001949 }
Vince Weaver11d15782009-07-08 17:46:14 -04001950
1951 return 0;
1952}
1953
Robert Richter72eae042009-04-29 12:47:10 +02001954static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001955{
Ingo Molnar703e9372008-12-17 10:51:15 +01001956 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001957 union cpuid10_eax eax;
1958 unsigned int unused;
1959 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02001960 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01001961
Vince Weaver11d15782009-07-08 17:46:14 -04001962 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1963 /* check for P6 processor family */
1964 if (boot_cpu_data.x86 == 6) {
1965 return p6_pmu_init();
1966 } else {
Robert Richter72eae042009-04-29 12:47:10 +02001967 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04001968 }
1969 }
Robert Richterda1a7762009-04-29 12:46:58 +02001970
Ingo Molnar241771e2008-12-03 10:39:53 +01001971 /*
1972 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02001973 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01001974 */
Ingo Molnar703e9372008-12-17 10:51:15 +01001975 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01001976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02001977 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001978
Robert Richterfaa28ae2009-04-29 12:47:13 +02001979 version = eax.split.version_id;
1980 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02001981 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001982
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001983 x86_pmu = intel_pmu;
1984 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001985 x86_pmu.num_events = eax.split.num_events;
1986 x86_pmu.event_bits = eax.split.bit_width;
1987 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02001988
1989 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001990 * Quirk: v2 perfmon does not report fixed-purpose events, so
1991 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02001992 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001993 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301994
Ingo Molnar8326f442009-06-05 20:22:46 +02001995 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001996 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02001997 */
1998 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08001999 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2000 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2001 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2002 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002003 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002004 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002005
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002006 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002007 break;
2008 default:
2009 case 26:
2010 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002011 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002012
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002013 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002014 break;
2015 case 28:
2016 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002017 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002018
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002019 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002020 break;
2021 }
Robert Richter72eae042009-04-29 12:47:10 +02002022 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302023}
2024
Robert Richter72eae042009-04-29 12:47:10 +02002025static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302026{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302027 /* Performance-monitoring supported from K7 and later: */
2028 if (boot_cpu_data.x86 < 6)
2029 return -ENODEV;
2030
Robert Richter4a06bd82009-04-29 12:47:11 +02002031 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002032
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302033 /* Events are common for all AMDs */
2034 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2035 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002036
Robert Richter72eae042009-04-29 12:47:10 +02002037 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302038}
2039
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002040void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302041{
Robert Richter72eae042009-04-29 12:47:10 +02002042 int err;
2043
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002044 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002045
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302046 switch (boot_cpu_data.x86_vendor) {
2047 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002048 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302049 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302050 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002051 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302052 break;
Robert Richter41389602009-04-29 12:47:00 +02002053 default:
2054 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302055 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002056 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002057 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302058 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002059 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302060
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002061 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002062
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002063 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2064 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2065 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2066 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002067 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002068 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2069 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002070
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002071 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2072 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2073 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2074 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002075 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002076
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002077 perf_event_mask |=
2078 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2079 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002080
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002081 perf_events_lapic_init();
2082 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002083
Ingo Molnar57c0c152009-09-21 12:20:38 +02002084 pr_info("... version: %d\n", x86_pmu.version);
2085 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2086 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2087 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2089 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2090 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002091}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002092
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002093static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002094{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002095 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002096}
2097
Robert Richter4aeb0b42009-04-29 12:47:03 +02002098static const struct pmu pmu = {
2099 .enable = x86_pmu_enable,
2100 .disable = x86_pmu_disable,
2101 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002102 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002103};
2104
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002105const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002106{
2107 int err;
2108
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002109 err = __hw_perf_event_init(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002110 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002111 if (event->destroy)
2112 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002113 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002114 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002115
Robert Richter4aeb0b42009-04-29 12:47:03 +02002116 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002117}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002118
2119/*
2120 * callchain support
2121 */
2122
2123static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002124void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002125{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002126 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002127 entry->ip[entry->nr++] = ip;
2128}
2129
Tejun Heo245b2e72009-06-24 15:13:48 +09002130static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2131static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002132static DEFINE_PER_CPU(int, in_nmi_frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002133
2134
2135static void
2136backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2137{
2138 /* Ignore warnings */
2139}
2140
2141static void backtrace_warning(void *data, char *msg)
2142{
2143 /* Ignore warnings */
2144}
2145
2146static int backtrace_stack(void *data, char *name)
2147{
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002148 per_cpu(in_nmi_frame, smp_processor_id()) =
2149 x86_is_stack_id(NMI_STACK, name);
2150
Ingo Molnar038e8362009-06-15 09:57:59 +02002151 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002152}
2153
2154static void backtrace_address(void *data, unsigned long addr, int reliable)
2155{
2156 struct perf_callchain_entry *entry = data;
2157
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02002158 if (per_cpu(in_nmi_frame, smp_processor_id()))
2159 return;
2160
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002161 if (reliable)
2162 callchain_store(entry, addr);
2163}
2164
2165static const struct stacktrace_ops backtrace_ops = {
2166 .warning = backtrace_warning,
2167 .warning_symbol = backtrace_warning_symbol,
2168 .stack = backtrace_stack,
2169 .address = backtrace_address,
2170};
2171
Ingo Molnar038e8362009-06-15 09:57:59 +02002172#include "../dumpstack.h"
2173
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002174static void
2175perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2176{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002177 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002178 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002179
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002180 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002181}
2182
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002183/*
2184 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2185 */
2186static unsigned long
2187copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002188{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002189 unsigned long offset, addr = (unsigned long)from;
2190 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2191 unsigned long size, len = 0;
2192 struct page *page;
2193 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002194 int ret;
2195
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002196 do {
2197 ret = __get_user_pages_fast(addr, 1, 0, &page);
2198 if (!ret)
2199 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002200
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002201 offset = addr & (PAGE_SIZE - 1);
2202 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002203
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002204 map = kmap_atomic(page, type);
2205 memcpy(to, map+offset, size);
2206 kunmap_atomic(map, type);
2207 put_page(page);
2208
2209 len += size;
2210 to += size;
2211 addr += size;
2212
2213 } while (len < n);
2214
2215 return len;
2216}
2217
2218static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2219{
2220 unsigned long bytes;
2221
2222 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2223
2224 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002225}
2226
2227static void
2228perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2229{
2230 struct stack_frame frame;
2231 const void __user *fp;
2232
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002233 if (!user_mode(regs))
2234 regs = task_pt_regs(current);
2235
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002236 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002237
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002238 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002239 callchain_store(entry, regs->ip);
2240
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002241 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002242 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002243 frame.return_address = 0;
2244
2245 if (!copy_stack_frame(fp, &frame))
2246 break;
2247
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002248 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002249 break;
2250
2251 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002252 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002253 }
2254}
2255
2256static void
2257perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2258{
2259 int is_user;
2260
2261 if (!regs)
2262 return;
2263
2264 is_user = user_mode(regs);
2265
2266 if (!current || current->pid == 0)
2267 return;
2268
2269 if (is_user && current->state != TASK_RUNNING)
2270 return;
2271
2272 if (!is_user)
2273 perf_callchain_kernel(regs, entry);
2274
2275 if (current->mm)
2276 perf_callchain_user(regs, entry);
2277}
2278
2279struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2280{
2281 struct perf_callchain_entry *entry;
2282
2283 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002284 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002285 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002286 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002287
2288 entry->nr = 0;
2289
2290 perf_do_callchain(regs, entry);
2291
2292 return entry;
2293}
Markus Metzger30dd5682009-07-21 15:56:48 +02002294
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002295void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002296{
2297 init_debug_store_on_cpu(cpu);
2298}