blob: fdbe24842271e62f73178a75218d8c07b45e284c [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010033
Ingo Molnarcdd6c482009-09-21 12:02:48 +020034/* The maximal number of PEBS events: */
35#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020036
37/* The size of a BTS record in bytes: */
38#define BTS_RECORD_SIZE 24
39
40/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020041#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020042
43/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020044#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020045
46
47/*
48 * Bits in the debugctlmsr controlling branch tracing.
49 */
50#define X86_DEBUGCTL_TR (1 << 6)
51#define X86_DEBUGCTL_BTS (1 << 7)
52#define X86_DEBUGCTL_BTINT (1 << 8)
53#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
54#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55
56/*
57 * A debug store configuration.
58 *
59 * We only support architectures that use 64bit fields.
60 */
61struct debug_store {
62 u64 bts_buffer_base;
63 u64 bts_index;
64 u64 bts_absolute_maximum;
65 u64 bts_interrupt_threshold;
66 u64 pebs_buffer_base;
67 u64 pebs_index;
68 u64 pebs_absolute_maximum;
69 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020070 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020071};
72
Stephane Eranian1da53e02010-01-18 10:58:01 +020073struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010074 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1];
77 };
Stephane Eranian1da53e02010-01-18 10:58:01 +020078 int code;
79 int cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010080 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020081};
82
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020084 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020085 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010086 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010087 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020088 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020089
90 int n_events;
91 int n_added;
92 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Ingo Molnar241771e2008-12-03 10:39:53 +010094};
95
Ingo Molnar184f4122010-01-27 08:39:39 +010096#define EVENT_CONSTRAINT(c, n, m) { \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010097 { .idxmsk64[0] = (n) }, \
98 .code = (c), \
99 .cmask = (m), \
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100100 .weight = HWEIGHT64((u64)(n)), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100101}
Stephane Eranianb6900812009-10-06 16:42:09 +0200102
Ingo Molnar184f4122010-01-27 08:39:39 +0100103#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
104#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100105
Ingo Molnar184f4122010-01-27 08:39:39 +0100106#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100107
Ingo Molnar184f4122010-01-27 08:39:39 +0100108#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200109
Ingo Molnar241771e2008-12-03 10:39:53 +0100110/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200111 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100112 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200113struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200114 const char *name;
115 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800116 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200117 void (*disable_all)(void);
118 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200119 void (*enable)(struct hw_perf_event *, int);
120 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530121 unsigned eventsel;
122 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100123 u64 (*event_map)(int);
124 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530125 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200126 int num_events;
127 int num_events_fixed;
128 int event_bits;
129 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200130 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200131 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200132 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200133 void (*enable_bts)(u64 config);
134 void (*disable_bts)(void);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100135
136 struct event_constraint *
137 (*get_event_constraints)(struct cpu_hw_events *cpuc,
138 struct perf_event *event);
139
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100140 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
141 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100142 struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530143};
144
Robert Richter4a06bd82009-04-29 12:47:11 +0200145static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530146
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200147static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100148 .enabled = 1,
149};
Ingo Molnar241771e2008-12-03 10:39:53 +0100150
Stephane Eranian1da53e02010-01-18 10:58:01 +0200151static int x86_perf_event_set_period(struct perf_event *event,
152 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200153
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530154/*
Vince Weaver11d15782009-07-08 17:46:14 -0400155 * Not sure about some of these
156 */
157static const u64 p6_perfmon_event_map[] =
158{
159 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
160 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200161 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
162 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400163 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
164 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
165 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
166};
167
Ingo Molnardfc65092009-09-21 11:31:35 +0200168static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400169{
Ingo Molnardfc65092009-09-21 11:31:35 +0200170 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400171}
172
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200173/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200174 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200175 * We use this to effectively disable a counter.
176 *
177 * L2_RQSTS with 0 MESI unit mask.
178 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200179#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200180
Ingo Molnardfc65092009-09-21 11:31:35 +0200181static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400182{
183#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
184#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
185#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
186#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200187#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400188
189#define P6_EVNTSEL_MASK \
190 (P6_EVNTSEL_EVENT_MASK | \
191 P6_EVNTSEL_UNIT_MASK | \
192 P6_EVNTSEL_EDGE_MASK | \
193 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200194 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400195
Ingo Molnardfc65092009-09-21 11:31:35 +0200196 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400197}
198
Stephane Eranian1da53e02010-01-18 10:58:01 +0200199static struct event_constraint intel_p6_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200200{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100201 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
202 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
203 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
204 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
205 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
206 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
Stephane Eranianb6900812009-10-06 16:42:09 +0200207 EVENT_CONSTRAINT_END
208};
Vince Weaver11d15782009-07-08 17:46:14 -0400209
210/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530211 * Intel PerfMon v3. Used on Core2 and later.
212 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100213static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100214{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200215 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
216 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
217 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
218 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
219 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
220 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
221 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100222};
223
Stephane Eranian1da53e02010-01-18 10:58:01 +0200224static struct event_constraint intel_core_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200225{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100226 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
227 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
228 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
229 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
230 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
231 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
232 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
233 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
234 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
235 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
236 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
Stephane Eranianb6900812009-10-06 16:42:09 +0200237 EVENT_CONSTRAINT_END
238};
239
Stephane Eranian1da53e02010-01-18 10:58:01 +0200240static struct event_constraint intel_nehalem_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200241{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100242 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
243 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
244 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
245 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
246 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
247 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
248 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
249 INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
250 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
251 INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
252 INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
253 INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200254 EVENT_CONSTRAINT_END
255};
256
257static struct event_constraint intel_gen_event_constraints[] =
258{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100259 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
260 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
Stephane Eranianb6900812009-10-06 16:42:09 +0200261 EVENT_CONSTRAINT_END
262};
263
Ingo Molnardfc65092009-09-21 11:31:35 +0200264static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530265{
Ingo Molnardfc65092009-09-21 11:31:35 +0200266 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530267}
Ingo Molnar241771e2008-12-03 10:39:53 +0100268
Ingo Molnar8326f442009-06-05 20:22:46 +0200269/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200270 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200271 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200272 * 'not supported', -1 means 'hw_event makes no sense on
273 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200274 * ID.
275 */
276
277#define C(x) PERF_COUNT_HW_CACHE_##x
278
279static u64 __read_mostly hw_cache_event_ids
280 [PERF_COUNT_HW_CACHE_MAX]
281 [PERF_COUNT_HW_CACHE_OP_MAX]
282 [PERF_COUNT_HW_CACHE_RESULT_MAX];
283
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900284static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200285 [PERF_COUNT_HW_CACHE_MAX]
286 [PERF_COUNT_HW_CACHE_OP_MAX]
287 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
288{
289 [ C(L1D) ] = {
290 [ C(OP_READ) ] = {
291 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
292 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
293 },
294 [ C(OP_WRITE) ] = {
295 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
296 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
297 },
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
300 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
301 },
302 },
303 [ C(L1I ) ] = {
304 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800305 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200306 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
307 },
308 [ C(OP_WRITE) ] = {
309 [ C(RESULT_ACCESS) ] = -1,
310 [ C(RESULT_MISS) ] = -1,
311 },
312 [ C(OP_PREFETCH) ] = {
313 [ C(RESULT_ACCESS) ] = 0x0,
314 [ C(RESULT_MISS) ] = 0x0,
315 },
316 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200317 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200318 [ C(OP_READ) ] = {
319 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
320 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
321 },
322 [ C(OP_WRITE) ] = {
323 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
324 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
325 },
326 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200327 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
328 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200329 },
330 },
331 [ C(DTLB) ] = {
332 [ C(OP_READ) ] = {
333 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
334 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
335 },
336 [ C(OP_WRITE) ] = {
337 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
338 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
339 },
340 [ C(OP_PREFETCH) ] = {
341 [ C(RESULT_ACCESS) ] = 0x0,
342 [ C(RESULT_MISS) ] = 0x0,
343 },
344 },
345 [ C(ITLB) ] = {
346 [ C(OP_READ) ] = {
347 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800348 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200349 },
350 [ C(OP_WRITE) ] = {
351 [ C(RESULT_ACCESS) ] = -1,
352 [ C(RESULT_MISS) ] = -1,
353 },
354 [ C(OP_PREFETCH) ] = {
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
357 },
358 },
359 [ C(BPU ) ] = {
360 [ C(OP_READ) ] = {
361 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
362 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
363 },
364 [ C(OP_WRITE) ] = {
365 [ C(RESULT_ACCESS) ] = -1,
366 [ C(RESULT_MISS) ] = -1,
367 },
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = -1,
370 [ C(RESULT_MISS) ] = -1,
371 },
372 },
373};
374
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900375static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200376 [PERF_COUNT_HW_CACHE_MAX]
377 [PERF_COUNT_HW_CACHE_OP_MAX]
378 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
379{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200380 [ C(L1D) ] = {
381 [ C(OP_READ) ] = {
382 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
383 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
384 },
385 [ C(OP_WRITE) ] = {
386 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
387 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
388 },
389 [ C(OP_PREFETCH) ] = {
390 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
391 [ C(RESULT_MISS) ] = 0,
392 },
393 },
394 [ C(L1I ) ] = {
395 [ C(OP_READ) ] = {
396 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
397 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
398 },
399 [ C(OP_WRITE) ] = {
400 [ C(RESULT_ACCESS) ] = -1,
401 [ C(RESULT_MISS) ] = -1,
402 },
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = 0,
405 [ C(RESULT_MISS) ] = 0,
406 },
407 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200408 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200409 [ C(OP_READ) ] = {
410 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
411 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
412 },
413 [ C(OP_WRITE) ] = {
414 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
415 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
416 },
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0,
419 [ C(RESULT_MISS) ] = 0,
420 },
421 },
422 [ C(DTLB) ] = {
423 [ C(OP_READ) ] = {
424 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
425 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
426 },
427 [ C(OP_WRITE) ] = {
428 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
429 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
430 },
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0,
433 [ C(RESULT_MISS) ] = 0,
434 },
435 },
436 [ C(ITLB) ] = {
437 [ C(OP_READ) ] = {
438 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
439 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
440 },
441 [ C(OP_WRITE) ] = {
442 [ C(RESULT_ACCESS) ] = -1,
443 [ C(RESULT_MISS) ] = -1,
444 },
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = -1,
447 [ C(RESULT_MISS) ] = -1,
448 },
449 },
450 [ C(BPU ) ] = {
451 [ C(OP_READ) ] = {
452 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
453 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
454 },
455 [ C(OP_WRITE) ] = {
456 [ C(RESULT_ACCESS) ] = -1,
457 [ C(RESULT_MISS) ] = -1,
458 },
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = -1,
461 [ C(RESULT_MISS) ] = -1,
462 },
463 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200464};
465
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900466static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200467 [PERF_COUNT_HW_CACHE_MAX]
468 [PERF_COUNT_HW_CACHE_OP_MAX]
469 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
470{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200471 [ C(L1D) ] = {
472 [ C(OP_READ) ] = {
473 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
474 [ C(RESULT_MISS) ] = 0,
475 },
476 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800477 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200478 [ C(RESULT_MISS) ] = 0,
479 },
480 [ C(OP_PREFETCH) ] = {
481 [ C(RESULT_ACCESS) ] = 0x0,
482 [ C(RESULT_MISS) ] = 0,
483 },
484 },
485 [ C(L1I ) ] = {
486 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800487 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
488 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200489 },
490 [ C(OP_WRITE) ] = {
491 [ C(RESULT_ACCESS) ] = -1,
492 [ C(RESULT_MISS) ] = -1,
493 },
494 [ C(OP_PREFETCH) ] = {
495 [ C(RESULT_ACCESS) ] = 0,
496 [ C(RESULT_MISS) ] = 0,
497 },
498 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200499 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200500 [ C(OP_READ) ] = {
501 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
502 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
503 },
504 [ C(OP_WRITE) ] = {
505 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
506 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
507 },
508 [ C(OP_PREFETCH) ] = {
509 [ C(RESULT_ACCESS) ] = 0,
510 [ C(RESULT_MISS) ] = 0,
511 },
512 },
513 [ C(DTLB) ] = {
514 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800515 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200516 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
517 },
518 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800519 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200520 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
521 },
522 [ C(OP_PREFETCH) ] = {
523 [ C(RESULT_ACCESS) ] = 0,
524 [ C(RESULT_MISS) ] = 0,
525 },
526 },
527 [ C(ITLB) ] = {
528 [ C(OP_READ) ] = {
529 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
530 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
531 },
532 [ C(OP_WRITE) ] = {
533 [ C(RESULT_ACCESS) ] = -1,
534 [ C(RESULT_MISS) ] = -1,
535 },
536 [ C(OP_PREFETCH) ] = {
537 [ C(RESULT_ACCESS) ] = -1,
538 [ C(RESULT_MISS) ] = -1,
539 },
540 },
541 [ C(BPU ) ] = {
542 [ C(OP_READ) ] = {
543 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
544 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
545 },
546 [ C(OP_WRITE) ] = {
547 [ C(RESULT_ACCESS) ] = -1,
548 [ C(RESULT_MISS) ] = -1,
549 },
550 [ C(OP_PREFETCH) ] = {
551 [ C(RESULT_ACCESS) ] = -1,
552 [ C(RESULT_MISS) ] = -1,
553 },
554 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200555};
556
Ingo Molnardfc65092009-09-21 11:31:35 +0200557static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100558{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100559#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
560#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200561#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
562#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200563#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100564
Ingo Molnar128f0482009-06-03 22:19:36 +0200565#define CORE_EVNTSEL_MASK \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200566 (INTEL_ARCH_EVTSEL_MASK | \
567 INTEL_ARCH_UNIT_MASK | \
568 INTEL_ARCH_EDGE_MASK | \
569 INTEL_ARCH_INV_MASK | \
570 INTEL_ARCH_CNT_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100571
Ingo Molnardfc65092009-09-21 11:31:35 +0200572 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100573}
574
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900575static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200576 [PERF_COUNT_HW_CACHE_MAX]
577 [PERF_COUNT_HW_CACHE_OP_MAX]
578 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
579{
580 [ C(L1D) ] = {
581 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530582 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
583 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200584 },
585 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530586 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200587 [ C(RESULT_MISS) ] = 0,
588 },
589 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530590 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
591 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200592 },
593 },
594 [ C(L1I ) ] = {
595 [ C(OP_READ) ] = {
596 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
597 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
598 },
599 [ C(OP_WRITE) ] = {
600 [ C(RESULT_ACCESS) ] = -1,
601 [ C(RESULT_MISS) ] = -1,
602 },
603 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530604 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200605 [ C(RESULT_MISS) ] = 0,
606 },
607 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200608 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200609 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530610 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
611 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200612 },
613 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530614 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200615 [ C(RESULT_MISS) ] = 0,
616 },
617 [ C(OP_PREFETCH) ] = {
618 [ C(RESULT_ACCESS) ] = 0,
619 [ C(RESULT_MISS) ] = 0,
620 },
621 },
622 [ C(DTLB) ] = {
623 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530624 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
625 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200626 },
627 [ C(OP_WRITE) ] = {
628 [ C(RESULT_ACCESS) ] = 0,
629 [ C(RESULT_MISS) ] = 0,
630 },
631 [ C(OP_PREFETCH) ] = {
632 [ C(RESULT_ACCESS) ] = 0,
633 [ C(RESULT_MISS) ] = 0,
634 },
635 },
636 [ C(ITLB) ] = {
637 [ C(OP_READ) ] = {
638 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
639 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
640 },
641 [ C(OP_WRITE) ] = {
642 [ C(RESULT_ACCESS) ] = -1,
643 [ C(RESULT_MISS) ] = -1,
644 },
645 [ C(OP_PREFETCH) ] = {
646 [ C(RESULT_ACCESS) ] = -1,
647 [ C(RESULT_MISS) ] = -1,
648 },
649 },
650 [ C(BPU ) ] = {
651 [ C(OP_READ) ] = {
652 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
653 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
654 },
655 [ C(OP_WRITE) ] = {
656 [ C(RESULT_ACCESS) ] = -1,
657 [ C(RESULT_MISS) ] = -1,
658 },
659 [ C(OP_PREFETCH) ] = {
660 [ C(RESULT_ACCESS) ] = -1,
661 [ C(RESULT_MISS) ] = -1,
662 },
663 },
664};
665
Ingo Molnar241771e2008-12-03 10:39:53 +0100666/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530667 * AMD Performance Monitor K7 and later.
668 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100669static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530670{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200671 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
672 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
673 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
674 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
675 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
676 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530677};
678
Ingo Molnardfc65092009-09-21 11:31:35 +0200679static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530680{
Ingo Molnardfc65092009-09-21 11:31:35 +0200681 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530682}
683
Ingo Molnardfc65092009-09-21 11:31:35 +0200684static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100685{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100686#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
687#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200688#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
689#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200690#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100691
692#define K7_EVNTSEL_MASK \
693 (K7_EVNTSEL_EVENT_MASK | \
694 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200695 K7_EVNTSEL_EDGE_MASK | \
696 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200697 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100698
Ingo Molnardfc65092009-09-21 11:31:35 +0200699 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100700}
701
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530702/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200703 * Propagate event elapsed time into the generic event.
704 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100705 * Returns the delta events processed.
706 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200707static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708x86_perf_event_update(struct perf_event *event,
709 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100710{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200711 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200712 u64 prev_raw_count, new_raw_count;
713 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100714
Markus Metzger30dd5682009-07-21 15:56:48 +0200715 if (idx == X86_PMC_IDX_FIXED_BTS)
716 return 0;
717
Ingo Molnaree060942008-12-13 09:00:03 +0100718 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200719 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100720 *
721 * Our tactic to handle this is to first atomically read and
722 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200723 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100724 */
725again:
726 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200727 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100728
729 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
730 new_raw_count) != prev_raw_count)
731 goto again;
732
733 /*
734 * Now we have the new raw value and have updated the prev
735 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200736 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100737 *
738 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200739 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100740 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200741 delta = (new_raw_count << shift) - (prev_raw_count << shift);
742 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100743
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200744 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100745 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200746
747 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100748}
749
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200750static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200751static DEFINE_MUTEX(pmc_reserve_mutex);
752
753static bool reserve_pmc_hardware(void)
754{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200755#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200756 int i;
757
758 if (nmi_watchdog == NMI_LOCAL_APIC)
759 disable_lapic_nmi_watchdog();
760
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200761 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200762 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200763 goto perfctr_fail;
764 }
765
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200766 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200767 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200768 goto eventsel_fail;
769 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200770#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200771
772 return true;
773
Ingo Molnar04da8a42009-08-11 10:40:08 +0200774#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200775eventsel_fail:
776 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200777 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200778
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200779 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200780
781perfctr_fail:
782 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200783 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200784
785 if (nmi_watchdog == NMI_LOCAL_APIC)
786 enable_lapic_nmi_watchdog();
787
788 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200789#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200790}
791
792static void release_pmc_hardware(void)
793{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200794#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200795 int i;
796
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200797 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200798 release_perfctr_nmi(x86_pmu.perfctr + i);
799 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200800 }
801
802 if (nmi_watchdog == NMI_LOCAL_APIC)
803 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200804#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200805}
806
Markus Metzger30dd5682009-07-21 15:56:48 +0200807static inline bool bts_available(void)
808{
809 return x86_pmu.enable_bts != NULL;
810}
811
812static inline void init_debug_store_on_cpu(int cpu)
813{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200814 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200815
816 if (!ds)
817 return;
818
819 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200820 (u32)((u64)(unsigned long)ds),
821 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200822}
823
824static inline void fini_debug_store_on_cpu(int cpu)
825{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200826 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200827 return;
828
829 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
830}
831
832static void release_bts_hardware(void)
833{
834 int cpu;
835
836 if (!bts_available())
837 return;
838
839 get_online_cpus();
840
841 for_each_online_cpu(cpu)
842 fini_debug_store_on_cpu(cpu);
843
844 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200845 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200846
847 if (!ds)
848 continue;
849
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200850 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200851
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200852 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200853 kfree(ds);
854 }
855
856 put_online_cpus();
857}
858
859static int reserve_bts_hardware(void)
860{
861 int cpu, err = 0;
862
863 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200864 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200865
866 get_online_cpus();
867
868 for_each_possible_cpu(cpu) {
869 struct debug_store *ds;
870 void *buffer;
871
872 err = -ENOMEM;
873 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
874 if (unlikely(!buffer))
875 break;
876
877 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
878 if (unlikely(!ds)) {
879 kfree(buffer);
880 break;
881 }
882
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200883 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200884 ds->bts_index = ds->bts_buffer_base;
885 ds->bts_absolute_maximum =
886 ds->bts_buffer_base + BTS_BUFFER_SIZE;
887 ds->bts_interrupt_threshold =
888 ds->bts_absolute_maximum - BTS_OVFL_TH;
889
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200890 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200891 err = 0;
892 }
893
894 if (err)
895 release_bts_hardware();
896 else {
897 for_each_online_cpu(cpu)
898 init_debug_store_on_cpu(cpu);
899 }
900
901 put_online_cpus();
902
903 return err;
904}
905
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200906static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200907{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200908 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200909 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200910 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200911 mutex_unlock(&pmc_reserve_mutex);
912 }
913}
914
Robert Richter85cf9db2009-04-29 12:47:20 +0200915static inline int x86_pmu_initialized(void)
916{
917 return x86_pmu.handle_irq != NULL;
918}
919
Ingo Molnar8326f442009-06-05 20:22:46 +0200920static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200921set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200922{
923 unsigned int cache_type, cache_op, cache_result;
924 u64 config, val;
925
926 config = attr->config;
927
928 cache_type = (config >> 0) & 0xff;
929 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
930 return -EINVAL;
931
932 cache_op = (config >> 8) & 0xff;
933 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
934 return -EINVAL;
935
936 cache_result = (config >> 16) & 0xff;
937 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
938 return -EINVAL;
939
940 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
941
942 if (val == 0)
943 return -ENOENT;
944
945 if (val == -1)
946 return -EINVAL;
947
948 hwc->config |= val;
949
950 return 0;
951}
952
Markus Metzger30dd5682009-07-21 15:56:48 +0200953static void intel_pmu_enable_bts(u64 config)
954{
955 unsigned long debugctlmsr;
956
957 debugctlmsr = get_debugctlmsr();
958
959 debugctlmsr |= X86_DEBUGCTL_TR;
960 debugctlmsr |= X86_DEBUGCTL_BTS;
961 debugctlmsr |= X86_DEBUGCTL_BTINT;
962
963 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
964 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
965
966 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
967 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
968
969 update_debugctlmsr(debugctlmsr);
970}
971
972static void intel_pmu_disable_bts(void)
973{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200974 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200975 unsigned long debugctlmsr;
976
977 if (!cpuc->ds)
978 return;
979
980 debugctlmsr = get_debugctlmsr();
981
982 debugctlmsr &=
983 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
984 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
985
986 update_debugctlmsr(debugctlmsr);
987}
988
Ingo Molnaree060942008-12-13 09:00:03 +0100989/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200990 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100991 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200992static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100993{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200994 struct perf_event_attr *attr = &event->attr;
995 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200996 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200997 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100998
Robert Richter85cf9db2009-04-29 12:47:20 +0200999 if (!x86_pmu_initialized())
1000 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001001
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001002 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001003 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001004 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001005 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +02001006 if (!reserve_pmc_hardware())
1007 err = -EBUSY;
1008 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001009 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +02001010 }
1011 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001012 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001013 mutex_unlock(&pmc_reserve_mutex);
1014 }
1015 if (err)
1016 return err;
1017
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001018 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001019
Ingo Molnar241771e2008-12-03 10:39:53 +01001020 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001021 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +01001022 * (keep 'enabled' bit clear for now)
1023 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001024 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +01001025
Stephane Eranianb6900812009-10-06 16:42:09 +02001026 hwc->idx = -1;
1027
Ingo Molnar241771e2008-12-03 10:39:53 +01001028 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001029 * Count user and OS events unless requested not to.
1030 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001031 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001032 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +02001033 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001034 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1035
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001036 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001037 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001038 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001039 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001040 } else {
1041 /*
1042 * If we have a PMU initialized but no APIC
1043 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001044 * events (user-space has to fall back and
1045 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001046 */
1047 if (!x86_pmu.apic)
1048 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001049 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001050
Ingo Molnar241771e2008-12-03 10:39:53 +01001051 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001052 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001053 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001054 if (attr->type == PERF_TYPE_RAW) {
1055 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001056 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001057 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001058
Ingo Molnar8326f442009-06-05 20:22:46 +02001059 if (attr->type == PERF_TYPE_HW_CACHE)
1060 return set_ext_hw_attr(hwc, attr);
1061
1062 if (attr->config >= x86_pmu.max_events)
1063 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001064
Ingo Molnar8326f442009-06-05 20:22:46 +02001065 /*
1066 * The generic map:
1067 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001068 config = x86_pmu.event_map(attr->config);
1069
1070 if (config == 0)
1071 return -ENOENT;
1072
1073 if (config == -1LL)
1074 return -EINVAL;
1075
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001076 /*
1077 * Branch tracing:
1078 */
1079 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001080 (hwc->sample_period == 1)) {
1081 /* BTS is not supported by this architecture. */
1082 if (!bts_available())
1083 return -EOPNOTSUPP;
1084
1085 /* BTS is currently only allowed for user-mode. */
1086 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1087 return -EOPNOTSUPP;
1088 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001089
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001090 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001091
Ingo Molnar241771e2008-12-03 10:39:53 +01001092 return 0;
1093}
1094
Vince Weaver11d15782009-07-08 17:46:14 -04001095static void p6_pmu_disable_all(void)
1096{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001097 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001098 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001099
1100 if (!cpuc->enabled)
1101 return;
1102
1103 cpuc->enabled = 0;
1104 barrier();
1105
1106 /* p6 only has one enable register */
1107 rdmsrl(MSR_P6_EVNTSEL0, val);
1108 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1109 wrmsrl(MSR_P6_EVNTSEL0, val);
1110}
1111
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001112static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001113{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001115
1116 if (!cpuc->enabled)
1117 return;
1118
1119 cpuc->enabled = 0;
1120 barrier();
1121
Ingo Molnar862a1a52008-12-17 13:09:20 +01001122 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001123
1124 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1125 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001126}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301127
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001128static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301129{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001131 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001132
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001133 if (!cpuc->enabled)
1134 return;
1135
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001136 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001137 /*
1138 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001139 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001140 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001141 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001142 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301143
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001144 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001145 u64 val;
1146
Robert Richter43f62012009-04-29 16:55:56 +02001147 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001148 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301149 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001150 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1151 continue;
1152 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1153 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301154 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301155}
1156
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001157void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301158{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001159 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1160
Robert Richter85cf9db2009-04-29 12:47:20 +02001161 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001162 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001163
1164 if (cpuc->enabled)
1165 cpuc->n_added = 0;
1166
1167 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301168}
Ingo Molnar241771e2008-12-03 10:39:53 +01001169
Vince Weaver11d15782009-07-08 17:46:14 -04001170static void p6_pmu_enable_all(void)
1171{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001172 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001173 unsigned long val;
1174
1175 if (cpuc->enabled)
1176 return;
1177
1178 cpuc->enabled = 1;
1179 barrier();
1180
1181 /* p6 only has one enable register */
1182 rdmsrl(MSR_P6_EVNTSEL0, val);
1183 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1184 wrmsrl(MSR_P6_EVNTSEL0, val);
1185}
1186
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001187static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301188{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001190
1191 if (cpuc->enabled)
1192 return;
1193
1194 cpuc->enabled = 1;
1195 barrier();
1196
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001197 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001198
1199 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001200 struct perf_event *event =
1201 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001202
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001203 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001204 return;
1205
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001206 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001207 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301208}
1209
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001210static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301211{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001212 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301213 int idx;
1214
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001215 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001216 return;
1217
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001218 cpuc->enabled = 1;
1219 barrier();
1220
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001221 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1222 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001223 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001224
Robert Richter43f62012009-04-29 16:55:56 +02001225 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001226 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001227
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001228 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001229 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1230 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301231 }
1232}
1233
Stephane Eranian1da53e02010-01-18 10:58:01 +02001234static const struct pmu pmu;
1235
1236static inline int is_x86_event(struct perf_event *event)
1237{
1238 return event->pmu == &pmu;
1239}
1240
1241static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1242{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001243 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001244 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001245 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001246 struct hw_perf_event *hwc;
1247
1248 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1249
1250 for (i = 0; i < n; i++) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001251 constraints[i] =
1252 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001253 }
1254
1255 /*
Stephane Eranian81130702010-01-21 17:39:01 +02001256 * fastpath, try to reuse previous register
1257 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001258 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +02001259 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001260 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +02001261
1262 /* never assigned */
1263 if (hwc->idx == -1)
1264 break;
1265
1266 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +01001267 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +02001268 break;
1269
1270 /* not already used */
1271 if (test_bit(hwc->idx, used_mask))
1272 break;
1273
Stephane Eranian81130702010-01-21 17:39:01 +02001274 set_bit(hwc->idx, used_mask);
1275 if (assign)
1276 assign[i] = hwc->idx;
1277 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001278 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +02001279 goto done;
1280
1281 /*
1282 * begin slow path
1283 */
1284
1285 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1286
1287 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001288 * weight = number of possible counters
1289 *
1290 * 1 = most constrained, only works on one counter
1291 * wmax = least constrained, works on any counter
1292 *
1293 * assign events to counters starting with most
1294 * constrained events.
1295 */
1296 wmax = x86_pmu.num_events;
1297
1298 /*
1299 * when fixed event counters are present,
1300 * wmax is incremented by 1 to account
1301 * for one more choice
1302 */
1303 if (x86_pmu.num_events_fixed)
1304 wmax++;
1305
Stephane Eranian81130702010-01-21 17:39:01 +02001306 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001307 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +02001308 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +01001309 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001310 hwc = &cpuc->event_list[i]->hw;
1311
Peter Zijlstra272d30b2010-01-22 16:32:17 +01001312 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001313 continue;
1314
Peter Zijlstra63b14642010-01-22 16:32:17 +01001315 for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001316 if (!test_bit(j, used_mask))
1317 break;
1318 }
1319
1320 if (j == X86_PMC_IDX_MAX)
1321 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001322
Stephane Eranian81130702010-01-21 17:39:01 +02001323 set_bit(j, used_mask);
1324
Stephane Eranian1da53e02010-01-18 10:58:01 +02001325 if (assign)
1326 assign[i] = j;
1327 num--;
1328 }
1329 }
Stephane Eranian81130702010-01-21 17:39:01 +02001330done:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001331 /*
1332 * scheduling failed or is just a simulation,
1333 * free resources if necessary
1334 */
1335 if (!assign || num) {
1336 for (i = 0; i < n; i++) {
1337 if (x86_pmu.put_event_constraints)
1338 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1339 }
1340 }
1341 return num ? -ENOSPC : 0;
1342}
1343
1344/*
1345 * dogrp: true if must collect siblings events (group)
1346 * returns total number of events and error code
1347 */
1348static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1349{
1350 struct perf_event *event;
1351 int n, max_count;
1352
1353 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1354
1355 /* current number of events already accepted */
1356 n = cpuc->n_events;
1357
1358 if (is_x86_event(leader)) {
1359 if (n >= max_count)
1360 return -ENOSPC;
1361 cpuc->event_list[n] = leader;
1362 n++;
1363 }
1364 if (!dogrp)
1365 return n;
1366
1367 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1368 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +02001369 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001370 continue;
1371
1372 if (n >= max_count)
1373 return -ENOSPC;
1374
1375 cpuc->event_list[n] = event;
1376 n++;
1377 }
1378 return n;
1379}
1380
1381
1382static inline void x86_assign_hw_event(struct perf_event *event,
1383 struct hw_perf_event *hwc, int idx)
1384{
1385 hwc->idx = idx;
1386
1387 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1388 hwc->config_base = 0;
1389 hwc->event_base = 0;
1390 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1391 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1392 /*
1393 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1394 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1395 */
1396 hwc->event_base =
1397 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1398 } else {
1399 hwc->config_base = x86_pmu.eventsel;
1400 hwc->event_base = x86_pmu.perfctr;
1401 }
1402}
1403
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001404void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001405{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001406 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1407 struct perf_event *event;
1408 struct hw_perf_event *hwc;
1409 int i;
1410
Robert Richter85cf9db2009-04-29 12:47:20 +02001411 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001412 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001413 if (cpuc->n_added) {
1414 /*
1415 * apply assignment obtained either from
1416 * hw_perf_group_sched_in() or x86_pmu_enable()
1417 *
1418 * step1: save events moving to new counters
1419 * step2: reprogram moved events into new counters
1420 */
1421 for (i = 0; i < cpuc->n_events; i++) {
1422
1423 event = cpuc->event_list[i];
1424 hwc = &event->hw;
1425
1426 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1427 continue;
1428
1429 x86_pmu.disable(hwc, hwc->idx);
1430
1431 clear_bit(hwc->idx, cpuc->active_mask);
1432 barrier();
1433 cpuc->events[hwc->idx] = NULL;
1434
1435 x86_perf_event_update(event, hwc, hwc->idx);
1436
1437 hwc->idx = -1;
1438 }
1439
1440 for (i = 0; i < cpuc->n_events; i++) {
1441
1442 event = cpuc->event_list[i];
1443 hwc = &event->hw;
1444
1445 if (hwc->idx == -1) {
1446 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1447 x86_perf_event_set_period(event, hwc, hwc->idx);
1448 }
1449 /*
1450 * need to mark as active because x86_pmu_disable()
1451 * clear active_mask and eventsp[] yet it preserves
1452 * idx
1453 */
1454 set_bit(hwc->idx, cpuc->active_mask);
1455 cpuc->events[hwc->idx] = event;
1456
1457 x86_pmu.enable(hwc, hwc->idx);
1458 perf_event_update_userpage(event);
1459 }
1460 cpuc->n_added = 0;
1461 perf_events_lapic_init();
1462 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001463 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001464}
Ingo Molnaree060942008-12-13 09:00:03 +01001465
Robert Richter19d84da2009-04-29 12:47:25 +02001466static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001467{
1468 u64 status;
1469
1470 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1471
1472 return status;
1473}
1474
Robert Richterdee5d902009-04-29 12:47:07 +02001475static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001476{
1477 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1478}
1479
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001480static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001481{
Vince Weaver11d15782009-07-08 17:46:14 -04001482 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001483 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001484}
1485
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001486static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001487{
Vince Weaver11d15782009-07-08 17:46:14 -04001488 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001489}
1490
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001491static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001492intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001493{
1494 int idx = __idx - X86_PMC_IDX_FIXED;
1495 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001496
1497 mask = 0xfULL << (idx * 4);
1498
1499 rdmsrl(hwc->config_base, ctrl_val);
1500 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001501 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1502}
1503
1504static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001505p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001506{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001507 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1508 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001509
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001510 if (cpuc->enabled)
1511 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001512
1513 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001514}
1515
1516static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001517intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001518{
Markus Metzger30dd5682009-07-21 15:56:48 +02001519 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1520 intel_pmu_disable_bts();
1521 return;
1522 }
1523
Robert Richterd4369892009-04-29 12:47:19 +02001524 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1525 intel_pmu_disable_fixed(hwc, idx);
1526 return;
1527 }
1528
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001529 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001530}
1531
1532static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001533amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001534{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001535 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001536}
1537
Tejun Heo245b2e72009-06-24 15:13:48 +09001538static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001539
Ingo Molnaree060942008-12-13 09:00:03 +01001540/*
1541 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001542 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001543 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001544static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001545x86_perf_event_set_period(struct perf_event *event,
1546 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001547{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001548 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001549 s64 period = hwc->sample_period;
1550 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001551
Markus Metzger30dd5682009-07-21 15:56:48 +02001552 if (idx == X86_PMC_IDX_FIXED_BTS)
1553 return 0;
1554
Ingo Molnaree060942008-12-13 09:00:03 +01001555 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001556 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001557 */
1558 if (unlikely(left <= -period)) {
1559 left = period;
1560 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001561 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001562 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001563 }
1564
1565 if (unlikely(left <= 0)) {
1566 left += period;
1567 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001568 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001569 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001570 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001571 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001572 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001573 */
1574 if (unlikely(left < 2))
1575 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001576
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001577 if (left > x86_pmu.max_period)
1578 left = x86_pmu.max_period;
1579
Tejun Heo245b2e72009-06-24 15:13:48 +09001580 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001581
1582 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001583 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001584 * mark it to be able to extra future deltas:
1585 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001586 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001587
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001588 err = checking_wrmsrl(hwc->event_base + idx,
1589 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001590
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001591 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001592
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001593 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001594}
1595
1596static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001598{
1599 int idx = __idx - X86_PMC_IDX_FIXED;
1600 u64 ctrl_val, bits, mask;
1601 int err;
1602
1603 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001604 * Enable IRQ generation (0x8),
1605 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1606 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001607 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001608 bits = 0x8ULL;
1609 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1610 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001611 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1612 bits |= 0x1;
1613 bits <<= (idx * 4);
1614 mask = 0xfULL << (idx * 4);
1615
1616 rdmsrl(hwc->config_base, ctrl_val);
1617 ctrl_val &= ~mask;
1618 ctrl_val |= bits;
1619 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001620}
1621
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001622static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001623{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001624 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001625 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001626
Peter Zijlstra984b8382009-07-10 09:59:56 +02001627 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001628 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001629 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1630
1631 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001632}
1633
1634
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001635static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001636{
Markus Metzger30dd5682009-07-21 15:56:48 +02001637 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001638 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001639 return;
1640
1641 intel_pmu_enable_bts(hwc->config);
1642 return;
1643 }
1644
Robert Richter7c90cc42009-04-29 12:47:18 +02001645 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1646 intel_pmu_enable_fixed(hwc, idx);
1647 return;
1648 }
1649
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001650 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001651}
1652
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001653static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001654{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001655 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001656
1657 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001658 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001659}
1660
Ingo Molnaree060942008-12-13 09:00:03 +01001661/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001662 * activate a single event
1663 *
1664 * The event is added to the group of enabled events
1665 * but only if it can be scehduled with existing events.
1666 *
1667 * Called with PMU disabled. If successful and return value 1,
1668 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001669 */
1670static int x86_pmu_enable(struct perf_event *event)
1671{
1672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001673 struct hw_perf_event *hwc;
1674 int assign[X86_PMC_IDX_MAX];
1675 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001676
Stephane Eranian1da53e02010-01-18 10:58:01 +02001677 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001678
Stephane Eranian1da53e02010-01-18 10:58:01 +02001679 n0 = cpuc->n_events;
1680 n = collect_events(cpuc, event, false);
1681 if (n < 0)
1682 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001683
Stephane Eranian1da53e02010-01-18 10:58:01 +02001684 ret = x86_schedule_events(cpuc, n, assign);
1685 if (ret)
1686 return ret;
1687 /*
1688 * copy new assignment, now we know it is possible
1689 * will be used by hw_perf_enable()
1690 */
1691 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001692
Stephane Eranian1da53e02010-01-18 10:58:01 +02001693 cpuc->n_events = n;
1694 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001695
Stephane Eranian1da53e02010-01-18 10:58:01 +02001696 if (hwc->idx != -1)
1697 x86_perf_event_set_period(event, hwc, hwc->idx);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001698
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001699 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001700}
1701
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001702static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001703{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001704 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1705 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001706
1707 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001708 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001709 return;
1710
1711 x86_pmu.enable(hwc, hwc->idx);
1712}
1713
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001714void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001715{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001716 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001717 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001718 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001719 int cpu, idx;
1720
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001721 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001722 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001723
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001724 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001725
1726 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001728
Robert Richterfaa28ae2009-04-29 12:47:13 +02001729 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301730 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1731 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1732 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1733 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001734
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301735 pr_info("\n");
1736 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1737 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1738 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1739 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301740 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001741 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001742
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001743 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001744 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1745 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001746
Tejun Heo245b2e72009-06-24 15:13:48 +09001747 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001748
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301749 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001750 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301751 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001752 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301753 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001754 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001755 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001756 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001757 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1758
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301759 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001760 cpu, idx, pmc_count);
1761 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001762 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001763}
1764
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001765static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001766{
1767 struct debug_store *ds = cpuc->ds;
1768 struct bts_record {
1769 u64 from;
1770 u64 to;
1771 u64 flags;
1772 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001773 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001774 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001775 struct perf_output_handle handle;
1776 struct perf_event_header header;
1777 struct perf_sample_data data;
1778 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001779
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001780 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001781 return;
1782
1783 if (!ds)
1784 return;
1785
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001786 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1787 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001788
Markus Metzger5622f292009-09-15 13:00:23 +02001789 if (top <= at)
1790 return;
1791
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001792 ds->bts_index = ds->bts_buffer_base;
1793
Markus Metzger30dd5682009-07-21 15:56:48 +02001794
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001795 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001796 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001797 data.raw = NULL;
Markus Metzger5622f292009-09-15 13:00:23 +02001798 regs.ip = 0;
1799
1800 /*
1801 * Prepare a generic sample, i.e. fill in the invariant fields.
1802 * We will overwrite the from and to address before we output
1803 * the sample.
1804 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001805 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001806
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001807 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001808 header.size * (top - at), 1, 1))
1809 return;
1810
1811 for (; at < top; at++) {
1812 data.ip = at->from;
1813 data.addr = at->to;
1814
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001815 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001816 }
1817
Markus Metzger5622f292009-09-15 13:00:23 +02001818 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001819
1820 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001821 event->hw.interrupts++;
1822 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001823}
1824
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001825static void x86_pmu_disable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001826{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001827 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1828 struct hw_perf_event *hwc = &event->hw;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001829 int i, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001830
Robert Richter09534232009-04-29 12:47:16 +02001831 /*
1832 * Must be done before we disable, otherwise the nmi handler
1833 * could reenable again:
1834 */
Robert Richter43f62012009-04-29 16:55:56 +02001835 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001836 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001837
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001838 /*
1839 * Make sure the cleared pointer becomes visible before we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001840 * (potentially) free the event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001841 */
Robert Richter527e26a2009-04-29 12:47:02 +02001842 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001843
Ingo Molnaree060942008-12-13 09:00:03 +01001844 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001845 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001846 * that we are disabling:
1847 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001848 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001849
1850 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001851 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1852 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001853
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001854 cpuc->events[idx] = NULL;
Peter Zijlstra194002b2009-06-22 16:35:24 +02001855
Stephane Eranian1da53e02010-01-18 10:58:01 +02001856 for (i = 0; i < cpuc->n_events; i++) {
1857 if (event == cpuc->event_list[i]) {
1858
1859 if (x86_pmu.put_event_constraints)
1860 x86_pmu.put_event_constraints(cpuc, event);
1861
1862 while (++i < cpuc->n_events)
1863 cpuc->event_list[i-1] = cpuc->event_list[i];
1864
1865 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001866 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001867 }
1868 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001869 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001870}
1871
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001872/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001873 * Save and restart an expired event. Called by NMI contexts,
1874 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001875 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001876static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001877{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001879 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001880 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001881
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001882 x86_perf_event_update(event, hwc, idx);
1883 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001884
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001885 if (event->state == PERF_EVENT_STATE_ACTIVE)
1886 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001887
1888 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001889}
1890
Ingo Molnaraaba9802009-05-26 08:10:00 +02001891static void intel_pmu_reset(void)
1892{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001893 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001894 unsigned long flags;
1895 int idx;
1896
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001897 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001898 return;
1899
1900 local_irq_save(flags);
1901
1902 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1903
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001904 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001905 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1906 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1907 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001908 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001909 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1910 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001911 if (ds)
1912 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001913
1914 local_irq_restore(flags);
1915}
1916
Vince Weaver11d15782009-07-08 17:46:14 -04001917static int p6_pmu_handle_irq(struct pt_regs *regs)
1918{
1919 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001920 struct cpu_hw_events *cpuc;
1921 struct perf_event *event;
1922 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001923 int idx, handled = 0;
1924 u64 val;
1925
Vince Weaver11d15782009-07-08 17:46:14 -04001926 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001927 data.raw = NULL;
Vince Weaver11d15782009-07-08 17:46:14 -04001928
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001929 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001930
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001931 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001932 if (!test_bit(idx, cpuc->active_mask))
1933 continue;
1934
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001935 event = cpuc->events[idx];
1936 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001937
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001938 val = x86_perf_event_update(event, hwc, idx);
1939 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001940 continue;
1941
1942 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001943 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001944 */
1945 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001946 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001947
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001948 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001949 continue;
1950
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001951 if (perf_event_overflow(event, 1, &data, regs))
1952 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001953 }
1954
1955 if (handled)
1956 inc_irq_stat(apic_perf_irqs);
1957
1958 return handled;
1959}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001960
Ingo Molnar241771e2008-12-03 10:39:53 +01001961/*
1962 * This handler is triggered by the local APIC, so the APIC IRQ handling
1963 * rules apply:
1964 */
Yong Wanga3288102009-06-03 13:12:55 +08001965static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001966{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001967 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001968 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001969 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001970 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001971
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001972 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001973 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001974
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001975 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001976
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001977 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001978 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001979 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001980 if (!status) {
1981 perf_enable();
1982 return 0;
1983 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001984
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001985 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001986again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001987 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001988 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1989 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001990 intel_pmu_reset();
1991 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001992 return 1;
1993 }
1994
Mike Galbraithd278c482009-02-09 07:38:50 +01001995 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001996 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001997 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001998 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01001999
2000 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02002001 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01002002 continue;
2003
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002004 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002005 continue;
2006
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002007 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02002008
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002009 if (perf_event_overflow(event, 1, &data, regs))
2010 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01002011 }
2012
Robert Richterdee5d902009-04-29 12:47:07 +02002013 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01002014
2015 /*
2016 * Repeat if there is more work to be done:
2017 */
Robert Richter19d84da2009-04-29 12:47:25 +02002018 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01002019 if (status)
2020 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002021
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002022 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002023
2024 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01002025}
2026
Yong Wanga3288102009-06-03 13:12:55 +08002027static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02002028{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002029 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002030 struct cpu_hw_events *cpuc;
2031 struct perf_event *event;
2032 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04002033 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002034 u64 val;
2035
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002036 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002037 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002038
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002039 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002040
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002041 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02002042 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02002043 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002044
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002045 event = cpuc->events[idx];
2046 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002047
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002048 val = x86_perf_event_update(event, hwc, idx);
2049 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002050 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002051
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002052 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002053 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002054 */
2055 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002056 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002057
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002058 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002059 continue;
2060
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 if (perf_event_overflow(event, 1, &data, regs))
2062 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002063 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002064
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002065 if (handled)
2066 inc_irq_stat(apic_perf_irqs);
2067
Robert Richtera29aa8a2009-04-29 12:47:21 +02002068 return handled;
2069}
Robert Richter39d81ea2009-04-29 12:47:05 +02002070
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002071void smp_perf_pending_interrupt(struct pt_regs *regs)
2072{
2073 irq_enter();
2074 ack_APIC_irq();
2075 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002076 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002077 irq_exit();
2078}
2079
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002080void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002081{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002082#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02002083 if (!x86_pmu.apic || !x86_pmu_initialized())
2084 return;
2085
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002086 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002087#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002088}
2089
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002090void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002091{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002092#ifdef CONFIG_X86_LOCAL_APIC
2093 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01002094 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02002095
Ingo Molnar241771e2008-12-03 10:39:53 +01002096 /*
Yong Wangc323d952009-05-29 13:28:35 +08002097 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01002098 */
Yong Wangc323d952009-05-29 13:28:35 +08002099 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002100#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01002101}
2102
2103static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002104perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01002105 unsigned long cmd, void *__args)
2106{
2107 struct die_args *args = __args;
2108 struct pt_regs *regs;
2109
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002110 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02002111 return NOTIFY_DONE;
2112
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002113 switch (cmd) {
2114 case DIE_NMI:
2115 case DIE_NMI_IPI:
2116 break;
2117
2118 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01002119 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002120 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002121
2122 regs = args->regs;
2123
Ingo Molnar04da8a42009-08-11 10:40:08 +02002124#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01002125 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002126#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002127 /*
2128 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002129 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002130 *
2131 * If the first NMI handles both, the latter will be empty and daze
2132 * the CPU.
2133 */
Yong Wanga3288102009-06-03 13:12:55 +08002134 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002135
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002136 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01002137}
2138
Peter Zijlstra63b14642010-01-22 16:32:17 +01002139static struct event_constraint unconstrained;
2140
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002141static struct event_constraint bts_constraint =
2142 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002143
Peter Zijlstra63b14642010-01-22 16:32:17 +01002144static struct event_constraint *
2145intel_special_constraints(struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002146{
2147 unsigned int hw_event;
2148
2149 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2150
2151 if (unlikely((hw_event ==
2152 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2153 (event->hw.sample_period == 1))) {
2154
Peter Zijlstra63b14642010-01-22 16:32:17 +01002155 return &bts_constraint;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002156 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002157 return NULL;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002158}
2159
Peter Zijlstra63b14642010-01-22 16:32:17 +01002160static struct event_constraint *
2161intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002162{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002163 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002164
Peter Zijlstra63b14642010-01-22 16:32:17 +01002165 c = intel_special_constraints(event);
2166 if (c)
2167 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002168
2169 if (x86_pmu.event_constraints) {
2170 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01002171 if ((event->hw.config & c->cmask) == c->code)
2172 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002173 }
2174 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002175
2176 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002177}
2178
Peter Zijlstra63b14642010-01-22 16:32:17 +01002179static struct event_constraint *
2180amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002181{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002182 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002183}
2184
2185static int x86_event_sched_in(struct perf_event *event,
2186 struct perf_cpu_context *cpuctx, int cpu)
2187{
2188 int ret = 0;
2189
2190 event->state = PERF_EVENT_STATE_ACTIVE;
2191 event->oncpu = cpu;
2192 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2193
2194 if (!is_x86_event(event))
2195 ret = event->pmu->enable(event);
2196
2197 if (!ret && !is_software_event(event))
2198 cpuctx->active_oncpu++;
2199
2200 if (!ret && event->attr.exclusive)
2201 cpuctx->exclusive = 1;
2202
2203 return ret;
2204}
2205
2206static void x86_event_sched_out(struct perf_event *event,
2207 struct perf_cpu_context *cpuctx, int cpu)
2208{
2209 event->state = PERF_EVENT_STATE_INACTIVE;
2210 event->oncpu = -1;
2211
2212 if (!is_x86_event(event))
2213 event->pmu->disable(event);
2214
2215 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2216
2217 if (!is_software_event(event))
2218 cpuctx->active_oncpu--;
2219
2220 if (event->attr.exclusive || !cpuctx->active_oncpu)
2221 cpuctx->exclusive = 0;
2222}
2223
2224/*
2225 * Called to enable a whole group of events.
2226 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2227 * Assumes the caller has disabled interrupts and has
2228 * frozen the PMU with hw_perf_save_disable.
2229 *
2230 * called with PMU disabled. If successful and return value 1,
2231 * then guaranteed to call perf_enable() and hw_perf_enable()
2232 */
2233int hw_perf_group_sched_in(struct perf_event *leader,
2234 struct perf_cpu_context *cpuctx,
2235 struct perf_event_context *ctx, int cpu)
2236{
2237 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2238 struct perf_event *sub;
2239 int assign[X86_PMC_IDX_MAX];
2240 int n0, n1, ret;
2241
2242 /* n0 = total number of events */
2243 n0 = collect_events(cpuc, leader, true);
2244 if (n0 < 0)
2245 return n0;
2246
2247 ret = x86_schedule_events(cpuc, n0, assign);
2248 if (ret)
2249 return ret;
2250
2251 ret = x86_event_sched_in(leader, cpuctx, cpu);
2252 if (ret)
2253 return ret;
2254
2255 n1 = 1;
2256 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02002257 if (sub->state > PERF_EVENT_STATE_OFF) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02002258 ret = x86_event_sched_in(sub, cpuctx, cpu);
2259 if (ret)
2260 goto undo;
2261 ++n1;
2262 }
2263 }
2264 /*
2265 * copy new assignment, now we know it is possible
2266 * will be used by hw_perf_enable()
2267 */
2268 memcpy(cpuc->assign, assign, n0*sizeof(int));
2269
2270 cpuc->n_events = n0;
2271 cpuc->n_added = n1;
2272 ctx->nr_active += n1;
2273
2274 /*
2275 * 1 means successful and events are active
2276 * This is not quite true because we defer
2277 * actual activation until hw_perf_enable() but
2278 * this way we* ensure caller won't try to enable
2279 * individual events
2280 */
2281 return 1;
2282undo:
2283 x86_event_sched_out(leader, cpuctx, cpu);
2284 n0 = 1;
2285 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2286 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2287 x86_event_sched_out(sub, cpuctx, cpu);
2288 if (++n0 == n1)
2289 break;
2290 }
2291 }
2292 return ret;
2293}
2294
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002295static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2296 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01002297 .next = NULL,
2298 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01002299};
2300
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002301static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04002302 .name = "p6",
2303 .handle_irq = p6_pmu_handle_irq,
2304 .disable_all = p6_pmu_disable_all,
2305 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002306 .enable = p6_pmu_enable_event,
2307 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04002308 .eventsel = MSR_P6_EVNTSEL0,
2309 .perfctr = MSR_P6_PERFCTR0,
2310 .event_map = p6_pmu_event_map,
2311 .raw_event = p6_pmu_raw_event,
2312 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002313 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04002314 .max_period = (1ULL << 31) - 1,
2315 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002316 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04002317 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002318 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04002319 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002320 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04002321 *
2322 * See IA-32 Intel Architecture Software developer manual Vol 3B
2323 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002324 .event_bits = 32,
2325 .event_mask = (1ULL << 32) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002326 .get_event_constraints = intel_get_event_constraints,
2327 .event_constraints = intel_p6_event_constraints
Vince Weaver11d15782009-07-08 17:46:14 -04002328};
2329
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002330static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002331 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02002332 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002333 .disable_all = intel_pmu_disable_all,
2334 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002335 .enable = intel_pmu_enable_event,
2336 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302337 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2338 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002339 .event_map = intel_pmu_event_map,
2340 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302341 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002342 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002343 /*
2344 * Intel PMCs cannot be accessed sanely above 32 bit width,
2345 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002346 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002347 */
2348 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002349 .enable_bts = intel_pmu_enable_bts,
2350 .disable_bts = intel_pmu_disable_bts,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002351 .get_event_constraints = intel_get_event_constraints
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302352};
2353
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002354static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002355 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02002356 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002357 .disable_all = amd_pmu_disable_all,
2358 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002359 .enable = amd_pmu_enable_event,
2360 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302361 .eventsel = MSR_K7_EVNTSEL0,
2362 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002363 .event_map = amd_pmu_event_map,
2364 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302365 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002366 .num_events = 4,
2367 .event_bits = 48,
2368 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002369 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002370 /* use highest bit to detect overflow */
2371 .max_period = (1ULL << 47) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002372 .get_event_constraints = amd_get_event_constraints
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302373};
2374
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002375static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002376{
Vince Weaver11d15782009-07-08 17:46:14 -04002377 switch (boot_cpu_data.x86_model) {
2378 case 1:
2379 case 3: /* Pentium Pro */
2380 case 5:
2381 case 6: /* Pentium II */
2382 case 7:
2383 case 8:
2384 case 11: /* Pentium III */
Vince Weaver11d15782009-07-08 17:46:14 -04002385 case 9:
2386 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002387 /* Pentium M */
2388 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002389 default:
2390 pr_cont("unsupported p6 CPU model %d ",
2391 boot_cpu_data.x86_model);
2392 return -ENODEV;
2393 }
2394
Ingo Molnar04da8a42009-08-11 10:40:08 +02002395 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002396
Vince Weaver11d15782009-07-08 17:46:14 -04002397 return 0;
2398}
2399
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002400static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002401{
Ingo Molnar703e9372008-12-17 10:51:15 +01002402 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002403 union cpuid10_eax eax;
2404 unsigned int unused;
2405 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002406 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002407
Vince Weaver11d15782009-07-08 17:46:14 -04002408 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2409 /* check for P6 processor family */
2410 if (boot_cpu_data.x86 == 6) {
2411 return p6_pmu_init();
2412 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002413 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002414 }
2415 }
Robert Richterda1a7762009-04-29 12:46:58 +02002416
Ingo Molnar241771e2008-12-03 10:39:53 +01002417 /*
2418 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002419 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002420 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002421 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002422 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002423 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002424
Robert Richterfaa28ae2009-04-29 12:47:13 +02002425 version = eax.split.version_id;
2426 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02002427 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002428
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002429 x86_pmu = intel_pmu;
2430 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002431 x86_pmu.num_events = eax.split.num_events;
2432 x86_pmu.event_bits = eax.split.bit_width;
2433 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002434
2435 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002436 * Quirk: v2 perfmon does not report fixed-purpose events, so
2437 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002438 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002439 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302440
Ingo Molnar8326f442009-06-05 20:22:46 +02002441 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002442 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002443 */
2444 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08002445 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2446 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2447 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2448 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002449 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002450 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002451
Stephane Eranian1da53e02010-01-18 10:58:01 +02002452 x86_pmu.event_constraints = intel_core_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002453 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002454 break;
Ingo Molnar8326f442009-06-05 20:22:46 +02002455 case 26:
2456 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002457 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002458
Stephane Eranian1da53e02010-01-18 10:58:01 +02002459 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002460 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002461 break;
2462 case 28:
2463 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002464 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002465
Stephane Eranian1da53e02010-01-18 10:58:01 +02002466 x86_pmu.event_constraints = intel_gen_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002467 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002468 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002469 default:
2470 /*
2471 * default constraints for v2 and up
2472 */
2473 x86_pmu.event_constraints = intel_gen_event_constraints;
2474 pr_cont("generic architected perfmon, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002475 }
Robert Richter72eae042009-04-29 12:47:10 +02002476 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302477}
2478
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002479static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302480{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302481 /* Performance-monitoring supported from K7 and later: */
2482 if (boot_cpu_data.x86 < 6)
2483 return -ENODEV;
2484
Robert Richter4a06bd82009-04-29 12:47:11 +02002485 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002486
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302487 /* Events are common for all AMDs */
2488 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2489 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002490
Robert Richter72eae042009-04-29 12:47:10 +02002491 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302492}
2493
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002494static void __init pmu_check_apic(void)
2495{
2496 if (cpu_has_apic)
2497 return;
2498
2499 x86_pmu.apic = 0;
2500 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2501 pr_info("no hardware sampling interrupt available.\n");
2502}
2503
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002504void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302505{
Robert Richter72eae042009-04-29 12:47:10 +02002506 int err;
2507
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002508 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002509
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302510 switch (boot_cpu_data.x86_vendor) {
2511 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002512 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302513 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302514 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002515 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302516 break;
Robert Richter41389602009-04-29 12:47:00 +02002517 default:
2518 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302519 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002520 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002521 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302522 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002523 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302524
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002525 pmu_check_apic();
2526
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002527 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002528
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002529 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2530 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2531 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2532 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002533 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002534 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2535 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002536
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002537 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2538 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2539 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2540 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002541 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002542
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002543 perf_event_mask |=
2544 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2545 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002546
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002547 perf_events_lapic_init();
2548 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002549
Peter Zijlstra63b14642010-01-22 16:32:17 +01002550 unconstrained = (struct event_constraint)
2551 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2552
Ingo Molnar57c0c152009-09-21 12:20:38 +02002553 pr_info("... version: %d\n", x86_pmu.version);
2554 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2555 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2556 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2557 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2558 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2559 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002560}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002561
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002562static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002563{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002564 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002565}
2566
Robert Richter4aeb0b42009-04-29 12:47:03 +02002567static const struct pmu pmu = {
2568 .enable = x86_pmu_enable,
2569 .disable = x86_pmu_disable,
2570 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002571 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002572};
2573
Stephane Eranian1da53e02010-01-18 10:58:01 +02002574/*
2575 * validate a single event group
2576 *
2577 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01002578 * - check events are compatible which each other
2579 * - events do not compete for the same counter
2580 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02002581 *
2582 * validation ensures the group can be loaded onto the
2583 * PMU if it was the only group available.
2584 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002585static int validate_group(struct perf_event *event)
2586{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002587 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002588 struct cpu_hw_events *fake_cpuc;
2589 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002590
Peter Zijlstra502568d2010-01-22 14:35:46 +01002591 ret = -ENOMEM;
2592 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2593 if (!fake_cpuc)
2594 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002595
Stephane Eranian1da53e02010-01-18 10:58:01 +02002596 /*
2597 * the event is not yet connected with its
2598 * siblings therefore we must first collect
2599 * existing siblings, then add the new event
2600 * before we can simulate the scheduling
2601 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002602 ret = -ENOSPC;
2603 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002604 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002605 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002606
Peter Zijlstra502568d2010-01-22 14:35:46 +01002607 fake_cpuc->n_events = n;
2608 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002609 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002610 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002611
Peter Zijlstra502568d2010-01-22 14:35:46 +01002612 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002613
Peter Zijlstra502568d2010-01-22 14:35:46 +01002614 ret = x86_schedule_events(fake_cpuc, n, NULL);
2615
2616out_free:
2617 kfree(fake_cpuc);
2618out:
2619 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002620}
2621
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002622const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002623{
Stephane Eranian81130702010-01-21 17:39:01 +02002624 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002625 int err;
2626
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002627 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002628 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002629 /*
2630 * we temporarily connect event to its pmu
2631 * such that validate_group() can classify
2632 * it as an x86 event using is_x86_event()
2633 */
2634 tmp = event->pmu;
2635 event->pmu = &pmu;
2636
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002637 if (event->group_leader != event)
2638 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002639
2640 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002641 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002642 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002643 if (event->destroy)
2644 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002645 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002646 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002647
Robert Richter4aeb0b42009-04-29 12:47:03 +02002648 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002649}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002650
2651/*
2652 * callchain support
2653 */
2654
2655static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002656void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002657{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002658 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002659 entry->ip[entry->nr++] = ip;
2660}
2661
Tejun Heo245b2e72009-06-24 15:13:48 +09002662static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2663static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002664
2665
2666static void
2667backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2668{
2669 /* Ignore warnings */
2670}
2671
2672static void backtrace_warning(void *data, char *msg)
2673{
2674 /* Ignore warnings */
2675}
2676
2677static int backtrace_stack(void *data, char *name)
2678{
Ingo Molnar038e8362009-06-15 09:57:59 +02002679 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002680}
2681
2682static void backtrace_address(void *data, unsigned long addr, int reliable)
2683{
2684 struct perf_callchain_entry *entry = data;
2685
2686 if (reliable)
2687 callchain_store(entry, addr);
2688}
2689
2690static const struct stacktrace_ops backtrace_ops = {
2691 .warning = backtrace_warning,
2692 .warning_symbol = backtrace_warning_symbol,
2693 .stack = backtrace_stack,
2694 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002695 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002696};
2697
Ingo Molnar038e8362009-06-15 09:57:59 +02002698#include "../dumpstack.h"
2699
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002700static void
2701perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2702{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002703 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002704 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002705
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01002706 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002707}
2708
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002709/*
2710 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2711 */
2712static unsigned long
2713copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002714{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002715 unsigned long offset, addr = (unsigned long)from;
2716 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2717 unsigned long size, len = 0;
2718 struct page *page;
2719 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002720 int ret;
2721
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002722 do {
2723 ret = __get_user_pages_fast(addr, 1, 0, &page);
2724 if (!ret)
2725 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002726
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002727 offset = addr & (PAGE_SIZE - 1);
2728 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002729
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002730 map = kmap_atomic(page, type);
2731 memcpy(to, map+offset, size);
2732 kunmap_atomic(map, type);
2733 put_page(page);
2734
2735 len += size;
2736 to += size;
2737 addr += size;
2738
2739 } while (len < n);
2740
2741 return len;
2742}
2743
2744static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2745{
2746 unsigned long bytes;
2747
2748 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2749
2750 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002751}
2752
2753static void
2754perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2755{
2756 struct stack_frame frame;
2757 const void __user *fp;
2758
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002759 if (!user_mode(regs))
2760 regs = task_pt_regs(current);
2761
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002762 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002763
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002764 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002765 callchain_store(entry, regs->ip);
2766
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002767 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002768 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002769 frame.return_address = 0;
2770
2771 if (!copy_stack_frame(fp, &frame))
2772 break;
2773
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002774 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002775 break;
2776
2777 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002778 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002779 }
2780}
2781
2782static void
2783perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2784{
2785 int is_user;
2786
2787 if (!regs)
2788 return;
2789
2790 is_user = user_mode(regs);
2791
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002792 if (is_user && current->state != TASK_RUNNING)
2793 return;
2794
2795 if (!is_user)
2796 perf_callchain_kernel(regs, entry);
2797
2798 if (current->mm)
2799 perf_callchain_user(regs, entry);
2800}
2801
2802struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2803{
2804 struct perf_callchain_entry *entry;
2805
2806 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002807 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002808 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002809 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002810
2811 entry->nr = 0;
2812
2813 perf_do_callchain(regs, entry);
2814
2815 return entry;
2816}
Markus Metzger30dd5682009-07-21 15:56:48 +02002817
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002818void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002819{
2820 init_debug_store_on_cpu(cpu);
2821}