blob: 951213a514892daeb842e0ca7a512d22d04e2fc4 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010033
Ingo Molnarcdd6c482009-09-21 12:02:48 +020034/* The maximal number of PEBS events: */
35#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020036
37/* The size of a BTS record in bytes: */
38#define BTS_RECORD_SIZE 24
39
40/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020041#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020042
43/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020044#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020045
46
47/*
48 * Bits in the debugctlmsr controlling branch tracing.
49 */
50#define X86_DEBUGCTL_TR (1 << 6)
51#define X86_DEBUGCTL_BTS (1 << 7)
52#define X86_DEBUGCTL_BTINT (1 << 8)
53#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
54#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55
56/*
57 * A debug store configuration.
58 *
59 * We only support architectures that use 64bit fields.
60 */
61struct debug_store {
62 u64 bts_buffer_base;
63 u64 bts_index;
64 u64 bts_absolute_maximum;
65 u64 bts_interrupt_threshold;
66 u64 pebs_buffer_base;
67 u64 pebs_index;
68 u64 pebs_absolute_maximum;
69 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020070 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020071};
72
Stephane Eranian1da53e02010-01-18 10:58:01 +020073struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010074 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1];
77 };
Stephane Eranian1da53e02010-01-18 10:58:01 +020078 int code;
79 int cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010080 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020081};
82
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020084 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020085 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010086 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010087 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020088 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020089
90 int n_events;
91 int n_added;
92 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Ingo Molnar241771e2008-12-03 10:39:53 +010094};
95
Ingo Molnar184f4122010-01-27 08:39:39 +010096#define EVENT_CONSTRAINT(c, n, m) { \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010097 { .idxmsk64[0] = (n) }, \
98 .code = (c), \
99 .cmask = (m), \
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100100 .weight = HWEIGHT64((u64)(n)), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100101}
Stephane Eranianb6900812009-10-06 16:42:09 +0200102
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100103#define INTEL_EVENT_CONSTRAINT(c, n) \
104 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100105
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100106#define FIXED_EVENT_CONSTRAINT(c, n) \
107 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100108
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100109#define EVENT_CONSTRAINT_END \
110 EVENT_CONSTRAINT(0, 0, 0)
111
112#define for_each_event_constraint(e, c) \
113 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200114
Ingo Molnar241771e2008-12-03 10:39:53 +0100115/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200116 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100117 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200118struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200119 const char *name;
120 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800121 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200122 void (*disable_all)(void);
123 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200124 void (*enable)(struct hw_perf_event *, int);
125 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530126 unsigned eventsel;
127 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100128 u64 (*event_map)(int);
129 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530130 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200131 int num_events;
132 int num_events_fixed;
133 int event_bits;
134 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200135 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200136 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200137 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200138 void (*enable_bts)(u64 config);
139 void (*disable_bts)(void);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100140
141 struct event_constraint *
142 (*get_event_constraints)(struct cpu_hw_events *cpuc,
143 struct perf_event *event);
144
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100145 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
146 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100147 struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530148};
149
Robert Richter4a06bd82009-04-29 12:47:11 +0200150static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530151
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200152static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100153 .enabled = 1,
154};
Ingo Molnar241771e2008-12-03 10:39:53 +0100155
Stephane Eranian1da53e02010-01-18 10:58:01 +0200156static int x86_perf_event_set_period(struct perf_event *event,
157 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200158
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530159/*
Vince Weaver11d15782009-07-08 17:46:14 -0400160 * Not sure about some of these
161 */
162static const u64 p6_perfmon_event_map[] =
163{
164 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
165 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200166 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
167 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400168 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
169 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
170 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
171};
172
Ingo Molnardfc65092009-09-21 11:31:35 +0200173static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400174{
Ingo Molnardfc65092009-09-21 11:31:35 +0200175 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400176}
177
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200178/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200179 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200180 * We use this to effectively disable a counter.
181 *
182 * L2_RQSTS with 0 MESI unit mask.
183 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200184#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200185
Ingo Molnardfc65092009-09-21 11:31:35 +0200186static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400187{
188#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
189#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
190#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
191#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200192#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400193
194#define P6_EVNTSEL_MASK \
195 (P6_EVNTSEL_EVENT_MASK | \
196 P6_EVNTSEL_UNIT_MASK | \
197 P6_EVNTSEL_EDGE_MASK | \
198 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200199 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400200
Ingo Molnardfc65092009-09-21 11:31:35 +0200201 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400202}
203
Stephane Eranian1da53e02010-01-18 10:58:01 +0200204static struct event_constraint intel_p6_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200205{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100206 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
207 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
208 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
209 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
210 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
211 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
Stephane Eranianb6900812009-10-06 16:42:09 +0200212 EVENT_CONSTRAINT_END
213};
Vince Weaver11d15782009-07-08 17:46:14 -0400214
215/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530216 * Intel PerfMon v3. Used on Core2 and later.
217 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100218static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100219{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200220 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
221 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
222 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
223 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
224 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
225 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
226 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100227};
228
Stephane Eranian1da53e02010-01-18 10:58:01 +0200229static struct event_constraint intel_core_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200230{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100231 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
232 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
233 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
234 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
235 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
236 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
237 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
238 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
239 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
241 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
Stephane Eranianb6900812009-10-06 16:42:09 +0200242 EVENT_CONSTRAINT_END
243};
244
Stephane Eranian1da53e02010-01-18 10:58:01 +0200245static struct event_constraint intel_nehalem_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200246{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100247 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
248 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
249 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
250 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
251 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
252 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
253 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
254 INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
255 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
256 INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
257 INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
258 INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200259 EVENT_CONSTRAINT_END
260};
261
262static struct event_constraint intel_gen_event_constraints[] =
263{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100264 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
265 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
Stephane Eranianb6900812009-10-06 16:42:09 +0200266 EVENT_CONSTRAINT_END
267};
268
Ingo Molnardfc65092009-09-21 11:31:35 +0200269static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530270{
Ingo Molnardfc65092009-09-21 11:31:35 +0200271 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530272}
Ingo Molnar241771e2008-12-03 10:39:53 +0100273
Ingo Molnar8326f442009-06-05 20:22:46 +0200274/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200275 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200276 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200277 * 'not supported', -1 means 'hw_event makes no sense on
278 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200279 * ID.
280 */
281
282#define C(x) PERF_COUNT_HW_CACHE_##x
283
284static u64 __read_mostly hw_cache_event_ids
285 [PERF_COUNT_HW_CACHE_MAX]
286 [PERF_COUNT_HW_CACHE_OP_MAX]
287 [PERF_COUNT_HW_CACHE_RESULT_MAX];
288
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900289static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200290 [PERF_COUNT_HW_CACHE_MAX]
291 [PERF_COUNT_HW_CACHE_OP_MAX]
292 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
293{
294 [ C(L1D) ] = {
295 [ C(OP_READ) ] = {
296 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
297 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
298 },
299 [ C(OP_WRITE) ] = {
300 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
301 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
302 },
303 [ C(OP_PREFETCH) ] = {
304 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
305 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
306 },
307 },
308 [ C(L1I ) ] = {
309 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800310 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200311 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
312 },
313 [ C(OP_WRITE) ] = {
314 [ C(RESULT_ACCESS) ] = -1,
315 [ C(RESULT_MISS) ] = -1,
316 },
317 [ C(OP_PREFETCH) ] = {
318 [ C(RESULT_ACCESS) ] = 0x0,
319 [ C(RESULT_MISS) ] = 0x0,
320 },
321 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200322 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200323 [ C(OP_READ) ] = {
324 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
325 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
326 },
327 [ C(OP_WRITE) ] = {
328 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
329 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
330 },
331 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200332 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
333 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200334 },
335 },
336 [ C(DTLB) ] = {
337 [ C(OP_READ) ] = {
338 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
339 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
340 },
341 [ C(OP_WRITE) ] = {
342 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
343 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
344 },
345 [ C(OP_PREFETCH) ] = {
346 [ C(RESULT_ACCESS) ] = 0x0,
347 [ C(RESULT_MISS) ] = 0x0,
348 },
349 },
350 [ C(ITLB) ] = {
351 [ C(OP_READ) ] = {
352 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800353 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200354 },
355 [ C(OP_WRITE) ] = {
356 [ C(RESULT_ACCESS) ] = -1,
357 [ C(RESULT_MISS) ] = -1,
358 },
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = -1,
361 [ C(RESULT_MISS) ] = -1,
362 },
363 },
364 [ C(BPU ) ] = {
365 [ C(OP_READ) ] = {
366 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
367 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
368 },
369 [ C(OP_WRITE) ] = {
370 [ C(RESULT_ACCESS) ] = -1,
371 [ C(RESULT_MISS) ] = -1,
372 },
373 [ C(OP_PREFETCH) ] = {
374 [ C(RESULT_ACCESS) ] = -1,
375 [ C(RESULT_MISS) ] = -1,
376 },
377 },
378};
379
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900380static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200381 [PERF_COUNT_HW_CACHE_MAX]
382 [PERF_COUNT_HW_CACHE_OP_MAX]
383 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
384{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200385 [ C(L1D) ] = {
386 [ C(OP_READ) ] = {
387 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
388 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
389 },
390 [ C(OP_WRITE) ] = {
391 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
392 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
393 },
394 [ C(OP_PREFETCH) ] = {
395 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
396 [ C(RESULT_MISS) ] = 0,
397 },
398 },
399 [ C(L1I ) ] = {
400 [ C(OP_READ) ] = {
401 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
402 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
403 },
404 [ C(OP_WRITE) ] = {
405 [ C(RESULT_ACCESS) ] = -1,
406 [ C(RESULT_MISS) ] = -1,
407 },
408 [ C(OP_PREFETCH) ] = {
409 [ C(RESULT_ACCESS) ] = 0,
410 [ C(RESULT_MISS) ] = 0,
411 },
412 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200413 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200414 [ C(OP_READ) ] = {
415 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
416 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
417 },
418 [ C(OP_WRITE) ] = {
419 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
420 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
421 },
422 [ C(OP_PREFETCH) ] = {
423 [ C(RESULT_ACCESS) ] = 0,
424 [ C(RESULT_MISS) ] = 0,
425 },
426 },
427 [ C(DTLB) ] = {
428 [ C(OP_READ) ] = {
429 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
430 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
431 },
432 [ C(OP_WRITE) ] = {
433 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
434 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
435 },
436 [ C(OP_PREFETCH) ] = {
437 [ C(RESULT_ACCESS) ] = 0,
438 [ C(RESULT_MISS) ] = 0,
439 },
440 },
441 [ C(ITLB) ] = {
442 [ C(OP_READ) ] = {
443 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
444 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
445 },
446 [ C(OP_WRITE) ] = {
447 [ C(RESULT_ACCESS) ] = -1,
448 [ C(RESULT_MISS) ] = -1,
449 },
450 [ C(OP_PREFETCH) ] = {
451 [ C(RESULT_ACCESS) ] = -1,
452 [ C(RESULT_MISS) ] = -1,
453 },
454 },
455 [ C(BPU ) ] = {
456 [ C(OP_READ) ] = {
457 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
458 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
459 },
460 [ C(OP_WRITE) ] = {
461 [ C(RESULT_ACCESS) ] = -1,
462 [ C(RESULT_MISS) ] = -1,
463 },
464 [ C(OP_PREFETCH) ] = {
465 [ C(RESULT_ACCESS) ] = -1,
466 [ C(RESULT_MISS) ] = -1,
467 },
468 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200469};
470
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900471static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200472 [PERF_COUNT_HW_CACHE_MAX]
473 [PERF_COUNT_HW_CACHE_OP_MAX]
474 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
475{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200476 [ C(L1D) ] = {
477 [ C(OP_READ) ] = {
478 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
479 [ C(RESULT_MISS) ] = 0,
480 },
481 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800482 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200483 [ C(RESULT_MISS) ] = 0,
484 },
485 [ C(OP_PREFETCH) ] = {
486 [ C(RESULT_ACCESS) ] = 0x0,
487 [ C(RESULT_MISS) ] = 0,
488 },
489 },
490 [ C(L1I ) ] = {
491 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800492 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
493 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200494 },
495 [ C(OP_WRITE) ] = {
496 [ C(RESULT_ACCESS) ] = -1,
497 [ C(RESULT_MISS) ] = -1,
498 },
499 [ C(OP_PREFETCH) ] = {
500 [ C(RESULT_ACCESS) ] = 0,
501 [ C(RESULT_MISS) ] = 0,
502 },
503 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200504 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200505 [ C(OP_READ) ] = {
506 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
507 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
508 },
509 [ C(OP_WRITE) ] = {
510 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
511 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
512 },
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = 0,
515 [ C(RESULT_MISS) ] = 0,
516 },
517 },
518 [ C(DTLB) ] = {
519 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800520 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200521 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
522 },
523 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800524 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200525 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
526 },
527 [ C(OP_PREFETCH) ] = {
528 [ C(RESULT_ACCESS) ] = 0,
529 [ C(RESULT_MISS) ] = 0,
530 },
531 },
532 [ C(ITLB) ] = {
533 [ C(OP_READ) ] = {
534 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
535 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
536 },
537 [ C(OP_WRITE) ] = {
538 [ C(RESULT_ACCESS) ] = -1,
539 [ C(RESULT_MISS) ] = -1,
540 },
541 [ C(OP_PREFETCH) ] = {
542 [ C(RESULT_ACCESS) ] = -1,
543 [ C(RESULT_MISS) ] = -1,
544 },
545 },
546 [ C(BPU ) ] = {
547 [ C(OP_READ) ] = {
548 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
549 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
550 },
551 [ C(OP_WRITE) ] = {
552 [ C(RESULT_ACCESS) ] = -1,
553 [ C(RESULT_MISS) ] = -1,
554 },
555 [ C(OP_PREFETCH) ] = {
556 [ C(RESULT_ACCESS) ] = -1,
557 [ C(RESULT_MISS) ] = -1,
558 },
559 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200560};
561
Ingo Molnardfc65092009-09-21 11:31:35 +0200562static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100563{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100564#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
565#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200566#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
567#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200568#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100569
Ingo Molnar128f0482009-06-03 22:19:36 +0200570#define CORE_EVNTSEL_MASK \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200571 (INTEL_ARCH_EVTSEL_MASK | \
572 INTEL_ARCH_UNIT_MASK | \
573 INTEL_ARCH_EDGE_MASK | \
574 INTEL_ARCH_INV_MASK | \
575 INTEL_ARCH_CNT_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100576
Ingo Molnardfc65092009-09-21 11:31:35 +0200577 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100578}
579
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900580static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200581 [PERF_COUNT_HW_CACHE_MAX]
582 [PERF_COUNT_HW_CACHE_OP_MAX]
583 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
584{
585 [ C(L1D) ] = {
586 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530587 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
588 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200589 },
590 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530591 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200592 [ C(RESULT_MISS) ] = 0,
593 },
594 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530595 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
596 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200597 },
598 },
599 [ C(L1I ) ] = {
600 [ C(OP_READ) ] = {
601 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
602 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
603 },
604 [ C(OP_WRITE) ] = {
605 [ C(RESULT_ACCESS) ] = -1,
606 [ C(RESULT_MISS) ] = -1,
607 },
608 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530609 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200610 [ C(RESULT_MISS) ] = 0,
611 },
612 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200613 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200614 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530615 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
616 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200617 },
618 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530619 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200620 [ C(RESULT_MISS) ] = 0,
621 },
622 [ C(OP_PREFETCH) ] = {
623 [ C(RESULT_ACCESS) ] = 0,
624 [ C(RESULT_MISS) ] = 0,
625 },
626 },
627 [ C(DTLB) ] = {
628 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530629 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
630 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200631 },
632 [ C(OP_WRITE) ] = {
633 [ C(RESULT_ACCESS) ] = 0,
634 [ C(RESULT_MISS) ] = 0,
635 },
636 [ C(OP_PREFETCH) ] = {
637 [ C(RESULT_ACCESS) ] = 0,
638 [ C(RESULT_MISS) ] = 0,
639 },
640 },
641 [ C(ITLB) ] = {
642 [ C(OP_READ) ] = {
643 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
644 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
645 },
646 [ C(OP_WRITE) ] = {
647 [ C(RESULT_ACCESS) ] = -1,
648 [ C(RESULT_MISS) ] = -1,
649 },
650 [ C(OP_PREFETCH) ] = {
651 [ C(RESULT_ACCESS) ] = -1,
652 [ C(RESULT_MISS) ] = -1,
653 },
654 },
655 [ C(BPU ) ] = {
656 [ C(OP_READ) ] = {
657 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
658 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
659 },
660 [ C(OP_WRITE) ] = {
661 [ C(RESULT_ACCESS) ] = -1,
662 [ C(RESULT_MISS) ] = -1,
663 },
664 [ C(OP_PREFETCH) ] = {
665 [ C(RESULT_ACCESS) ] = -1,
666 [ C(RESULT_MISS) ] = -1,
667 },
668 },
669};
670
Ingo Molnar241771e2008-12-03 10:39:53 +0100671/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530672 * AMD Performance Monitor K7 and later.
673 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100674static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530675{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200676 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
677 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
678 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
679 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
680 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
681 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530682};
683
Ingo Molnardfc65092009-09-21 11:31:35 +0200684static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530685{
Ingo Molnardfc65092009-09-21 11:31:35 +0200686 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530687}
688
Ingo Molnardfc65092009-09-21 11:31:35 +0200689static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100690{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100691#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
692#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200693#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
694#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200695#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100696
697#define K7_EVNTSEL_MASK \
698 (K7_EVNTSEL_EVENT_MASK | \
699 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200700 K7_EVNTSEL_EDGE_MASK | \
701 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200702 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100703
Ingo Molnardfc65092009-09-21 11:31:35 +0200704 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100705}
706
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530707/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708 * Propagate event elapsed time into the generic event.
709 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100710 * Returns the delta events processed.
711 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200712static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200713x86_perf_event_update(struct perf_event *event,
714 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100715{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200716 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200717 u64 prev_raw_count, new_raw_count;
718 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100719
Markus Metzger30dd5682009-07-21 15:56:48 +0200720 if (idx == X86_PMC_IDX_FIXED_BTS)
721 return 0;
722
Ingo Molnaree060942008-12-13 09:00:03 +0100723 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200724 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100725 *
726 * Our tactic to handle this is to first atomically read and
727 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200728 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100729 */
730again:
731 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200732 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100733
734 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
735 new_raw_count) != prev_raw_count)
736 goto again;
737
738 /*
739 * Now we have the new raw value and have updated the prev
740 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200741 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100742 *
743 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200744 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100745 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200746 delta = (new_raw_count << shift) - (prev_raw_count << shift);
747 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100748
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200749 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100750 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200751
752 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100753}
754
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200755static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200756static DEFINE_MUTEX(pmc_reserve_mutex);
757
758static bool reserve_pmc_hardware(void)
759{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200760#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200761 int i;
762
763 if (nmi_watchdog == NMI_LOCAL_APIC)
764 disable_lapic_nmi_watchdog();
765
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200766 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200767 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200768 goto perfctr_fail;
769 }
770
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200771 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200772 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200773 goto eventsel_fail;
774 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200775#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200776
777 return true;
778
Ingo Molnar04da8a42009-08-11 10:40:08 +0200779#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200780eventsel_fail:
781 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200782 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200783
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200784 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200785
786perfctr_fail:
787 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200788 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200789
790 if (nmi_watchdog == NMI_LOCAL_APIC)
791 enable_lapic_nmi_watchdog();
792
793 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200794#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200795}
796
797static void release_pmc_hardware(void)
798{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200799#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200800 int i;
801
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200802 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200803 release_perfctr_nmi(x86_pmu.perfctr + i);
804 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200805 }
806
807 if (nmi_watchdog == NMI_LOCAL_APIC)
808 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200809#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200810}
811
Markus Metzger30dd5682009-07-21 15:56:48 +0200812static inline bool bts_available(void)
813{
814 return x86_pmu.enable_bts != NULL;
815}
816
817static inline void init_debug_store_on_cpu(int cpu)
818{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200819 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200820
821 if (!ds)
822 return;
823
824 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200825 (u32)((u64)(unsigned long)ds),
826 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200827}
828
829static inline void fini_debug_store_on_cpu(int cpu)
830{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200831 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200832 return;
833
834 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
835}
836
837static void release_bts_hardware(void)
838{
839 int cpu;
840
841 if (!bts_available())
842 return;
843
844 get_online_cpus();
845
846 for_each_online_cpu(cpu)
847 fini_debug_store_on_cpu(cpu);
848
849 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200850 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200851
852 if (!ds)
853 continue;
854
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200855 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200856
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200857 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200858 kfree(ds);
859 }
860
861 put_online_cpus();
862}
863
864static int reserve_bts_hardware(void)
865{
866 int cpu, err = 0;
867
868 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200869 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200870
871 get_online_cpus();
872
873 for_each_possible_cpu(cpu) {
874 struct debug_store *ds;
875 void *buffer;
876
877 err = -ENOMEM;
878 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
879 if (unlikely(!buffer))
880 break;
881
882 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
883 if (unlikely(!ds)) {
884 kfree(buffer);
885 break;
886 }
887
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200888 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200889 ds->bts_index = ds->bts_buffer_base;
890 ds->bts_absolute_maximum =
891 ds->bts_buffer_base + BTS_BUFFER_SIZE;
892 ds->bts_interrupt_threshold =
893 ds->bts_absolute_maximum - BTS_OVFL_TH;
894
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200895 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200896 err = 0;
897 }
898
899 if (err)
900 release_bts_hardware();
901 else {
902 for_each_online_cpu(cpu)
903 init_debug_store_on_cpu(cpu);
904 }
905
906 put_online_cpus();
907
908 return err;
909}
910
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200911static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200912{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200913 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200914 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200915 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200916 mutex_unlock(&pmc_reserve_mutex);
917 }
918}
919
Robert Richter85cf9db2009-04-29 12:47:20 +0200920static inline int x86_pmu_initialized(void)
921{
922 return x86_pmu.handle_irq != NULL;
923}
924
Ingo Molnar8326f442009-06-05 20:22:46 +0200925static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200926set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200927{
928 unsigned int cache_type, cache_op, cache_result;
929 u64 config, val;
930
931 config = attr->config;
932
933 cache_type = (config >> 0) & 0xff;
934 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
935 return -EINVAL;
936
937 cache_op = (config >> 8) & 0xff;
938 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
939 return -EINVAL;
940
941 cache_result = (config >> 16) & 0xff;
942 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
943 return -EINVAL;
944
945 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
946
947 if (val == 0)
948 return -ENOENT;
949
950 if (val == -1)
951 return -EINVAL;
952
953 hwc->config |= val;
954
955 return 0;
956}
957
Markus Metzger30dd5682009-07-21 15:56:48 +0200958static void intel_pmu_enable_bts(u64 config)
959{
960 unsigned long debugctlmsr;
961
962 debugctlmsr = get_debugctlmsr();
963
964 debugctlmsr |= X86_DEBUGCTL_TR;
965 debugctlmsr |= X86_DEBUGCTL_BTS;
966 debugctlmsr |= X86_DEBUGCTL_BTINT;
967
968 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
969 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
970
971 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
972 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
973
974 update_debugctlmsr(debugctlmsr);
975}
976
977static void intel_pmu_disable_bts(void)
978{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200979 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200980 unsigned long debugctlmsr;
981
982 if (!cpuc->ds)
983 return;
984
985 debugctlmsr = get_debugctlmsr();
986
987 debugctlmsr &=
988 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
989 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
990
991 update_debugctlmsr(debugctlmsr);
992}
993
Ingo Molnaree060942008-12-13 09:00:03 +0100994/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200995 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100996 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200997static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100998{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200999 struct perf_event_attr *attr = &event->attr;
1000 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001001 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001002 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +01001003
Robert Richter85cf9db2009-04-29 12:47:20 +02001004 if (!x86_pmu_initialized())
1005 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001006
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001007 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001008 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001009 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001010 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +02001011 if (!reserve_pmc_hardware())
1012 err = -EBUSY;
1013 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001014 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +02001015 }
1016 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001017 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001018 mutex_unlock(&pmc_reserve_mutex);
1019 }
1020 if (err)
1021 return err;
1022
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001023 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001024
Ingo Molnar241771e2008-12-03 10:39:53 +01001025 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001026 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +01001027 * (keep 'enabled' bit clear for now)
1028 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001029 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +01001030
Stephane Eranianb6900812009-10-06 16:42:09 +02001031 hwc->idx = -1;
1032
Ingo Molnar241771e2008-12-03 10:39:53 +01001033 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001034 * Count user and OS events unless requested not to.
1035 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001036 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001037 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +02001038 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001039 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1040
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001041 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001042 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001043 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001044 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001045 } else {
1046 /*
1047 * If we have a PMU initialized but no APIC
1048 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001049 * events (user-space has to fall back and
1050 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001051 */
1052 if (!x86_pmu.apic)
1053 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001054 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001055
Ingo Molnar241771e2008-12-03 10:39:53 +01001056 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001057 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001058 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001059 if (attr->type == PERF_TYPE_RAW) {
1060 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001061 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001062 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001063
Ingo Molnar8326f442009-06-05 20:22:46 +02001064 if (attr->type == PERF_TYPE_HW_CACHE)
1065 return set_ext_hw_attr(hwc, attr);
1066
1067 if (attr->config >= x86_pmu.max_events)
1068 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001069
Ingo Molnar8326f442009-06-05 20:22:46 +02001070 /*
1071 * The generic map:
1072 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001073 config = x86_pmu.event_map(attr->config);
1074
1075 if (config == 0)
1076 return -ENOENT;
1077
1078 if (config == -1LL)
1079 return -EINVAL;
1080
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001081 /*
1082 * Branch tracing:
1083 */
1084 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001085 (hwc->sample_period == 1)) {
1086 /* BTS is not supported by this architecture. */
1087 if (!bts_available())
1088 return -EOPNOTSUPP;
1089
1090 /* BTS is currently only allowed for user-mode. */
1091 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1092 return -EOPNOTSUPP;
1093 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001094
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001095 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001096
Ingo Molnar241771e2008-12-03 10:39:53 +01001097 return 0;
1098}
1099
Vince Weaver11d15782009-07-08 17:46:14 -04001100static void p6_pmu_disable_all(void)
1101{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001102 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001103 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001104
1105 if (!cpuc->enabled)
1106 return;
1107
1108 cpuc->enabled = 0;
1109 barrier();
1110
1111 /* p6 only has one enable register */
1112 rdmsrl(MSR_P6_EVNTSEL0, val);
1113 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1114 wrmsrl(MSR_P6_EVNTSEL0, val);
1115}
1116
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001117static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001118{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001120
1121 if (!cpuc->enabled)
1122 return;
1123
1124 cpuc->enabled = 0;
1125 barrier();
1126
Ingo Molnar862a1a52008-12-17 13:09:20 +01001127 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001128
1129 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1130 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001131}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301132
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001133static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301134{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001135 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001136 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001137
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001138 if (!cpuc->enabled)
1139 return;
1140
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001141 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001142 /*
1143 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001144 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001145 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001146 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001147 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301148
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001149 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001150 u64 val;
1151
Robert Richter43f62012009-04-29 16:55:56 +02001152 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001153 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301154 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001155 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1156 continue;
1157 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1158 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301159 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301160}
1161
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001162void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301163{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001164 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1165
Robert Richter85cf9db2009-04-29 12:47:20 +02001166 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001167 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001168
1169 if (cpuc->enabled)
1170 cpuc->n_added = 0;
1171
1172 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301173}
Ingo Molnar241771e2008-12-03 10:39:53 +01001174
Vince Weaver11d15782009-07-08 17:46:14 -04001175static void p6_pmu_enable_all(void)
1176{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001177 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001178 unsigned long val;
1179
1180 if (cpuc->enabled)
1181 return;
1182
1183 cpuc->enabled = 1;
1184 barrier();
1185
1186 /* p6 only has one enable register */
1187 rdmsrl(MSR_P6_EVNTSEL0, val);
1188 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1189 wrmsrl(MSR_P6_EVNTSEL0, val);
1190}
1191
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001192static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301193{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001194 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001195
1196 if (cpuc->enabled)
1197 return;
1198
1199 cpuc->enabled = 1;
1200 barrier();
1201
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001202 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001203
1204 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001205 struct perf_event *event =
1206 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001207
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001208 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001209 return;
1210
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001211 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001212 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301213}
1214
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001215static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301216{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001217 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301218 int idx;
1219
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001220 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001221 return;
1222
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001223 cpuc->enabled = 1;
1224 barrier();
1225
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1227 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001228 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001229
Robert Richter43f62012009-04-29 16:55:56 +02001230 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001231 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001232
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001233 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001234 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1235 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301236 }
1237}
1238
Stephane Eranian1da53e02010-01-18 10:58:01 +02001239static const struct pmu pmu;
1240
1241static inline int is_x86_event(struct perf_event *event)
1242{
1243 return event->pmu == &pmu;
1244}
1245
1246static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1247{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001248 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001249 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001250 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001251 struct hw_perf_event *hwc;
1252
1253 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1254
1255 for (i = 0; i < n; i++) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001256 constraints[i] =
1257 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001258 }
1259
1260 /*
Stephane Eranian81130702010-01-21 17:39:01 +02001261 * fastpath, try to reuse previous register
1262 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001263 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +02001264 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001265 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +02001266
1267 /* never assigned */
1268 if (hwc->idx == -1)
1269 break;
1270
1271 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +01001272 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +02001273 break;
1274
1275 /* not already used */
1276 if (test_bit(hwc->idx, used_mask))
1277 break;
1278
Stephane Eranian81130702010-01-21 17:39:01 +02001279 set_bit(hwc->idx, used_mask);
1280 if (assign)
1281 assign[i] = hwc->idx;
1282 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001283 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +02001284 goto done;
1285
1286 /*
1287 * begin slow path
1288 */
1289
1290 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1291
1292 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001293 * weight = number of possible counters
1294 *
1295 * 1 = most constrained, only works on one counter
1296 * wmax = least constrained, works on any counter
1297 *
1298 * assign events to counters starting with most
1299 * constrained events.
1300 */
1301 wmax = x86_pmu.num_events;
1302
1303 /*
1304 * when fixed event counters are present,
1305 * wmax is incremented by 1 to account
1306 * for one more choice
1307 */
1308 if (x86_pmu.num_events_fixed)
1309 wmax++;
1310
Stephane Eranian81130702010-01-21 17:39:01 +02001311 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001312 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +02001313 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +01001314 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001315 hwc = &cpuc->event_list[i]->hw;
1316
Peter Zijlstra272d30b2010-01-22 16:32:17 +01001317 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001318 continue;
1319
Peter Zijlstra63b14642010-01-22 16:32:17 +01001320 for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001321 if (!test_bit(j, used_mask))
1322 break;
1323 }
1324
1325 if (j == X86_PMC_IDX_MAX)
1326 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001327
Stephane Eranian81130702010-01-21 17:39:01 +02001328 set_bit(j, used_mask);
1329
Stephane Eranian1da53e02010-01-18 10:58:01 +02001330 if (assign)
1331 assign[i] = j;
1332 num--;
1333 }
1334 }
Stephane Eranian81130702010-01-21 17:39:01 +02001335done:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001336 /*
1337 * scheduling failed or is just a simulation,
1338 * free resources if necessary
1339 */
1340 if (!assign || num) {
1341 for (i = 0; i < n; i++) {
1342 if (x86_pmu.put_event_constraints)
1343 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1344 }
1345 }
1346 return num ? -ENOSPC : 0;
1347}
1348
1349/*
1350 * dogrp: true if must collect siblings events (group)
1351 * returns total number of events and error code
1352 */
1353static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1354{
1355 struct perf_event *event;
1356 int n, max_count;
1357
1358 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1359
1360 /* current number of events already accepted */
1361 n = cpuc->n_events;
1362
1363 if (is_x86_event(leader)) {
1364 if (n >= max_count)
1365 return -ENOSPC;
1366 cpuc->event_list[n] = leader;
1367 n++;
1368 }
1369 if (!dogrp)
1370 return n;
1371
1372 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1373 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +02001374 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001375 continue;
1376
1377 if (n >= max_count)
1378 return -ENOSPC;
1379
1380 cpuc->event_list[n] = event;
1381 n++;
1382 }
1383 return n;
1384}
1385
1386
1387static inline void x86_assign_hw_event(struct perf_event *event,
1388 struct hw_perf_event *hwc, int idx)
1389{
1390 hwc->idx = idx;
1391
1392 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1393 hwc->config_base = 0;
1394 hwc->event_base = 0;
1395 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1396 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1397 /*
1398 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1399 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1400 */
1401 hwc->event_base =
1402 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1403 } else {
1404 hwc->config_base = x86_pmu.eventsel;
1405 hwc->event_base = x86_pmu.perfctr;
1406 }
1407}
1408
Peter Zijlstra2e841872010-01-25 15:58:43 +01001409static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
1410
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001411void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001412{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001413 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1414 struct perf_event *event;
1415 struct hw_perf_event *hwc;
1416 int i;
1417
Robert Richter85cf9db2009-04-29 12:47:20 +02001418 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001419 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001420 if (cpuc->n_added) {
1421 /*
1422 * apply assignment obtained either from
1423 * hw_perf_group_sched_in() or x86_pmu_enable()
1424 *
1425 * step1: save events moving to new counters
1426 * step2: reprogram moved events into new counters
1427 */
1428 for (i = 0; i < cpuc->n_events; i++) {
1429
1430 event = cpuc->event_list[i];
1431 hwc = &event->hw;
1432
1433 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1434 continue;
1435
Peter Zijlstra2e841872010-01-25 15:58:43 +01001436 __x86_pmu_disable(event, cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001437
1438 hwc->idx = -1;
1439 }
1440
1441 for (i = 0; i < cpuc->n_events; i++) {
1442
1443 event = cpuc->event_list[i];
1444 hwc = &event->hw;
1445
1446 if (hwc->idx == -1) {
1447 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1448 x86_perf_event_set_period(event, hwc, hwc->idx);
1449 }
1450 /*
1451 * need to mark as active because x86_pmu_disable()
1452 * clear active_mask and eventsp[] yet it preserves
1453 * idx
1454 */
1455 set_bit(hwc->idx, cpuc->active_mask);
1456 cpuc->events[hwc->idx] = event;
1457
1458 x86_pmu.enable(hwc, hwc->idx);
1459 perf_event_update_userpage(event);
1460 }
1461 cpuc->n_added = 0;
1462 perf_events_lapic_init();
1463 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001464 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001465}
Ingo Molnaree060942008-12-13 09:00:03 +01001466
Robert Richter19d84da2009-04-29 12:47:25 +02001467static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001468{
1469 u64 status;
1470
1471 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1472
1473 return status;
1474}
1475
Robert Richterdee5d902009-04-29 12:47:07 +02001476static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001477{
1478 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1479}
1480
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001481static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001482{
Vince Weaver11d15782009-07-08 17:46:14 -04001483 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001484 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001485}
1486
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001487static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001488{
Vince Weaver11d15782009-07-08 17:46:14 -04001489 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001490}
1491
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001492static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001493intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001494{
1495 int idx = __idx - X86_PMC_IDX_FIXED;
1496 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001497
1498 mask = 0xfULL << (idx * 4);
1499
1500 rdmsrl(hwc->config_base, ctrl_val);
1501 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001502 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1503}
1504
1505static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001506p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001507{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001508 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1509 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001510
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001511 if (cpuc->enabled)
1512 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001513
1514 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001515}
1516
1517static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001518intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001519{
Markus Metzger30dd5682009-07-21 15:56:48 +02001520 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1521 intel_pmu_disable_bts();
1522 return;
1523 }
1524
Robert Richterd4369892009-04-29 12:47:19 +02001525 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1526 intel_pmu_disable_fixed(hwc, idx);
1527 return;
1528 }
1529
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001530 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001531}
1532
1533static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001534amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001535{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001536 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001537}
1538
Tejun Heo245b2e72009-06-24 15:13:48 +09001539static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001540
Ingo Molnaree060942008-12-13 09:00:03 +01001541/*
1542 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001543 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001544 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001545static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546x86_perf_event_set_period(struct perf_event *event,
1547 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001548{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001549 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001550 s64 period = hwc->sample_period;
1551 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001552
Markus Metzger30dd5682009-07-21 15:56:48 +02001553 if (idx == X86_PMC_IDX_FIXED_BTS)
1554 return 0;
1555
Ingo Molnaree060942008-12-13 09:00:03 +01001556 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001557 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001558 */
1559 if (unlikely(left <= -period)) {
1560 left = period;
1561 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001562 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001563 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001564 }
1565
1566 if (unlikely(left <= 0)) {
1567 left += period;
1568 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001569 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001570 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001571 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001572 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001573 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001574 */
1575 if (unlikely(left < 2))
1576 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001577
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001578 if (left > x86_pmu.max_period)
1579 left = x86_pmu.max_period;
1580
Tejun Heo245b2e72009-06-24 15:13:48 +09001581 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001582
1583 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001584 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001585 * mark it to be able to extra future deltas:
1586 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001587 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001588
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001589 err = checking_wrmsrl(hwc->event_base + idx,
1590 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001591
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001592 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001593
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001594 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001595}
1596
1597static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001598intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001599{
1600 int idx = __idx - X86_PMC_IDX_FIXED;
1601 u64 ctrl_val, bits, mask;
1602 int err;
1603
1604 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001605 * Enable IRQ generation (0x8),
1606 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1607 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001608 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001609 bits = 0x8ULL;
1610 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1611 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001612 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1613 bits |= 0x1;
1614 bits <<= (idx * 4);
1615 mask = 0xfULL << (idx * 4);
1616
1617 rdmsrl(hwc->config_base, ctrl_val);
1618 ctrl_val &= ~mask;
1619 ctrl_val |= bits;
1620 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001621}
1622
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001623static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001624{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001625 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001626 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001627
Peter Zijlstra984b8382009-07-10 09:59:56 +02001628 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001629 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001630 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1631
1632 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001633}
1634
1635
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001636static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001637{
Markus Metzger30dd5682009-07-21 15:56:48 +02001638 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001639 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001640 return;
1641
1642 intel_pmu_enable_bts(hwc->config);
1643 return;
1644 }
1645
Robert Richter7c90cc42009-04-29 12:47:18 +02001646 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1647 intel_pmu_enable_fixed(hwc, idx);
1648 return;
1649 }
1650
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001651 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001652}
1653
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001654static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001655{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001656 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001657
1658 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001659 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001660}
1661
Ingo Molnaree060942008-12-13 09:00:03 +01001662/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001663 * activate a single event
1664 *
1665 * The event is added to the group of enabled events
1666 * but only if it can be scehduled with existing events.
1667 *
1668 * Called with PMU disabled. If successful and return value 1,
1669 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001670 */
1671static int x86_pmu_enable(struct perf_event *event)
1672{
1673 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001674 struct hw_perf_event *hwc;
1675 int assign[X86_PMC_IDX_MAX];
1676 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001677
Stephane Eranian1da53e02010-01-18 10:58:01 +02001678 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001679
Stephane Eranian1da53e02010-01-18 10:58:01 +02001680 n0 = cpuc->n_events;
1681 n = collect_events(cpuc, event, false);
1682 if (n < 0)
1683 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001684
Stephane Eranian1da53e02010-01-18 10:58:01 +02001685 ret = x86_schedule_events(cpuc, n, assign);
1686 if (ret)
1687 return ret;
1688 /*
1689 * copy new assignment, now we know it is possible
1690 * will be used by hw_perf_enable()
1691 */
1692 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001693
Stephane Eranian1da53e02010-01-18 10:58:01 +02001694 cpuc->n_events = n;
1695 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001696
Stephane Eranian1da53e02010-01-18 10:58:01 +02001697 if (hwc->idx != -1)
1698 x86_perf_event_set_period(event, hwc, hwc->idx);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001699
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001700 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001701}
1702
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001703static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001704{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1706 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001707
1708 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001709 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001710 return;
1711
1712 x86_pmu.enable(hwc, hwc->idx);
1713}
1714
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001715void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001716{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001717 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001718 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001719 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001720 int cpu, idx;
1721
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001722 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001723 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001724
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001725 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001726
1727 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001728 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001729
Robert Richterfaa28ae2009-04-29 12:47:13 +02001730 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301731 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1732 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1733 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1734 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001735
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301736 pr_info("\n");
1737 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1738 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1739 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1740 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301741 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001742 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001743
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001744 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001745 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1746 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001747
Tejun Heo245b2e72009-06-24 15:13:48 +09001748 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001749
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301750 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001751 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301752 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001753 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301754 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001755 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001756 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001757 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001758 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1759
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301760 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001761 cpu, idx, pmc_count);
1762 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001763 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001764}
1765
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001767{
1768 struct debug_store *ds = cpuc->ds;
1769 struct bts_record {
1770 u64 from;
1771 u64 to;
1772 u64 flags;
1773 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001774 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001775 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001776 struct perf_output_handle handle;
1777 struct perf_event_header header;
1778 struct perf_sample_data data;
1779 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001780
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001781 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001782 return;
1783
1784 if (!ds)
1785 return;
1786
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001787 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1788 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001789
Markus Metzger5622f292009-09-15 13:00:23 +02001790 if (top <= at)
1791 return;
1792
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001793 ds->bts_index = ds->bts_buffer_base;
1794
Markus Metzger30dd5682009-07-21 15:56:48 +02001795
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001796 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001797 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001798 data.raw = NULL;
Markus Metzger5622f292009-09-15 13:00:23 +02001799 regs.ip = 0;
1800
1801 /*
1802 * Prepare a generic sample, i.e. fill in the invariant fields.
1803 * We will overwrite the from and to address before we output
1804 * the sample.
1805 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001806 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001807
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001808 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001809 header.size * (top - at), 1, 1))
1810 return;
1811
1812 for (; at < top; at++) {
1813 data.ip = at->from;
1814 data.addr = at->to;
1815
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001816 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001817 }
1818
Markus Metzger5622f292009-09-15 13:00:23 +02001819 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001820
1821 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001822 event->hw.interrupts++;
1823 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001824}
1825
Peter Zijlstra2e841872010-01-25 15:58:43 +01001826static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc)
Ingo Molnar241771e2008-12-03 10:39:53 +01001827{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001828 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001829 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001830
Robert Richter09534232009-04-29 12:47:16 +02001831 /*
1832 * Must be done before we disable, otherwise the nmi handler
1833 * could reenable again:
1834 */
Robert Richter43f62012009-04-29 16:55:56 +02001835 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001836 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001837
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001838 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001839 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001840 * that we are disabling:
1841 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001842 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001843
1844 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001845 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1846 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001847
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001848 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001849}
1850
1851static void x86_pmu_disable(struct perf_event *event)
1852{
1853 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1854 int i;
1855
1856 __x86_pmu_disable(event, cpuc);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001857
Stephane Eranian1da53e02010-01-18 10:58:01 +02001858 for (i = 0; i < cpuc->n_events; i++) {
1859 if (event == cpuc->event_list[i]) {
1860
1861 if (x86_pmu.put_event_constraints)
1862 x86_pmu.put_event_constraints(cpuc, event);
1863
1864 while (++i < cpuc->n_events)
1865 cpuc->event_list[i-1] = cpuc->event_list[i];
1866
1867 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001868 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001869 }
1870 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001871 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001872}
1873
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001874/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001875 * Save and restart an expired event. Called by NMI contexts,
1876 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001877 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001879{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001880 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001881 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001882 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001883
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001884 x86_perf_event_update(event, hwc, idx);
1885 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001886
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001887 if (event->state == PERF_EVENT_STATE_ACTIVE)
1888 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001889
1890 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001891}
1892
Ingo Molnaraaba9802009-05-26 08:10:00 +02001893static void intel_pmu_reset(void)
1894{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001896 unsigned long flags;
1897 int idx;
1898
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001899 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001900 return;
1901
1902 local_irq_save(flags);
1903
1904 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1905
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001906 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001907 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1908 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1909 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001911 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1912 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001913 if (ds)
1914 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001915
1916 local_irq_restore(flags);
1917}
1918
Vince Weaver11d15782009-07-08 17:46:14 -04001919static int p6_pmu_handle_irq(struct pt_regs *regs)
1920{
1921 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001922 struct cpu_hw_events *cpuc;
1923 struct perf_event *event;
1924 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001925 int idx, handled = 0;
1926 u64 val;
1927
Vince Weaver11d15782009-07-08 17:46:14 -04001928 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001929 data.raw = NULL;
Vince Weaver11d15782009-07-08 17:46:14 -04001930
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001931 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001932
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001933 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001934 if (!test_bit(idx, cpuc->active_mask))
1935 continue;
1936
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001937 event = cpuc->events[idx];
1938 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001939
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001940 val = x86_perf_event_update(event, hwc, idx);
1941 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001942 continue;
1943
1944 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001945 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001946 */
1947 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001948 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001949
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001950 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001951 continue;
1952
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001953 if (perf_event_overflow(event, 1, &data, regs))
1954 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001955 }
1956
1957 if (handled)
1958 inc_irq_stat(apic_perf_irqs);
1959
1960 return handled;
1961}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001962
Ingo Molnar241771e2008-12-03 10:39:53 +01001963/*
1964 * This handler is triggered by the local APIC, so the APIC IRQ handling
1965 * rules apply:
1966 */
Yong Wanga3288102009-06-03 13:12:55 +08001967static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001968{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001969 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001970 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001971 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001972 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001973
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001974 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001975 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001976
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001977 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001978
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001979 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001980 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001981 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001982 if (!status) {
1983 perf_enable();
1984 return 0;
1985 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001986
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001987 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001988again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001989 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001990 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1991 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001992 intel_pmu_reset();
1993 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001994 return 1;
1995 }
1996
Mike Galbraithd278c482009-02-09 07:38:50 +01001997 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001998 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001999 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002000 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01002001
2002 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02002003 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01002004 continue;
2005
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002006 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002007 continue;
2008
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002009 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02002010
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002011 if (perf_event_overflow(event, 1, &data, regs))
2012 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01002013 }
2014
Robert Richterdee5d902009-04-29 12:47:07 +02002015 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01002016
2017 /*
2018 * Repeat if there is more work to be done:
2019 */
Robert Richter19d84da2009-04-29 12:47:25 +02002020 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01002021 if (status)
2022 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002023
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002024 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002025
2026 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01002027}
2028
Yong Wanga3288102009-06-03 13:12:55 +08002029static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02002030{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002031 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002032 struct cpu_hw_events *cpuc;
2033 struct perf_event *event;
2034 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04002035 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002036 u64 val;
2037
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002038 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002039 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002040
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002041 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002042
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002043 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02002044 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02002045 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002046
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002047 event = cpuc->events[idx];
2048 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002049
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002050 val = x86_perf_event_update(event, hwc, idx);
2051 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002052 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002053
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002054 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002055 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002056 */
2057 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002058 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002059
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002060 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002061 continue;
2062
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002063 if (perf_event_overflow(event, 1, &data, regs))
2064 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002065 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002066
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002067 if (handled)
2068 inc_irq_stat(apic_perf_irqs);
2069
Robert Richtera29aa8a2009-04-29 12:47:21 +02002070 return handled;
2071}
Robert Richter39d81ea2009-04-29 12:47:05 +02002072
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002073void smp_perf_pending_interrupt(struct pt_regs *regs)
2074{
2075 irq_enter();
2076 ack_APIC_irq();
2077 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002078 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002079 irq_exit();
2080}
2081
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002082void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002083{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002084#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02002085 if (!x86_pmu.apic || !x86_pmu_initialized())
2086 return;
2087
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002088 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002089#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002090}
2091
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002092void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002093{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002094#ifdef CONFIG_X86_LOCAL_APIC
2095 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01002096 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02002097
Ingo Molnar241771e2008-12-03 10:39:53 +01002098 /*
Yong Wangc323d952009-05-29 13:28:35 +08002099 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01002100 */
Yong Wangc323d952009-05-29 13:28:35 +08002101 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002102#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01002103}
2104
2105static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002106perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01002107 unsigned long cmd, void *__args)
2108{
2109 struct die_args *args = __args;
2110 struct pt_regs *regs;
2111
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002112 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02002113 return NOTIFY_DONE;
2114
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002115 switch (cmd) {
2116 case DIE_NMI:
2117 case DIE_NMI_IPI:
2118 break;
2119
2120 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01002121 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002122 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002123
2124 regs = args->regs;
2125
Ingo Molnar04da8a42009-08-11 10:40:08 +02002126#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01002127 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002128#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002129 /*
2130 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002131 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002132 *
2133 * If the first NMI handles both, the latter will be empty and daze
2134 * the CPU.
2135 */
Yong Wanga3288102009-06-03 13:12:55 +08002136 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002137
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002138 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01002139}
2140
Peter Zijlstra63b14642010-01-22 16:32:17 +01002141static struct event_constraint unconstrained;
2142
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002143static struct event_constraint bts_constraint =
2144 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002145
Peter Zijlstra63b14642010-01-22 16:32:17 +01002146static struct event_constraint *
2147intel_special_constraints(struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002148{
2149 unsigned int hw_event;
2150
2151 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2152
2153 if (unlikely((hw_event ==
2154 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2155 (event->hw.sample_period == 1))) {
2156
Peter Zijlstra63b14642010-01-22 16:32:17 +01002157 return &bts_constraint;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002158 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002159 return NULL;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002160}
2161
Peter Zijlstra63b14642010-01-22 16:32:17 +01002162static struct event_constraint *
2163intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002164{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002165 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002166
Peter Zijlstra63b14642010-01-22 16:32:17 +01002167 c = intel_special_constraints(event);
2168 if (c)
2169 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002170
2171 if (x86_pmu.event_constraints) {
2172 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01002173 if ((event->hw.config & c->cmask) == c->code)
2174 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002175 }
2176 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002177
2178 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002179}
2180
Peter Zijlstra63b14642010-01-22 16:32:17 +01002181static struct event_constraint *
2182amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002183{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002184 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002185}
2186
2187static int x86_event_sched_in(struct perf_event *event,
2188 struct perf_cpu_context *cpuctx, int cpu)
2189{
2190 int ret = 0;
2191
2192 event->state = PERF_EVENT_STATE_ACTIVE;
2193 event->oncpu = cpu;
2194 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2195
2196 if (!is_x86_event(event))
2197 ret = event->pmu->enable(event);
2198
2199 if (!ret && !is_software_event(event))
2200 cpuctx->active_oncpu++;
2201
2202 if (!ret && event->attr.exclusive)
2203 cpuctx->exclusive = 1;
2204
2205 return ret;
2206}
2207
2208static void x86_event_sched_out(struct perf_event *event,
2209 struct perf_cpu_context *cpuctx, int cpu)
2210{
2211 event->state = PERF_EVENT_STATE_INACTIVE;
2212 event->oncpu = -1;
2213
2214 if (!is_x86_event(event))
2215 event->pmu->disable(event);
2216
2217 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2218
2219 if (!is_software_event(event))
2220 cpuctx->active_oncpu--;
2221
2222 if (event->attr.exclusive || !cpuctx->active_oncpu)
2223 cpuctx->exclusive = 0;
2224}
2225
2226/*
2227 * Called to enable a whole group of events.
2228 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2229 * Assumes the caller has disabled interrupts and has
2230 * frozen the PMU with hw_perf_save_disable.
2231 *
2232 * called with PMU disabled. If successful and return value 1,
2233 * then guaranteed to call perf_enable() and hw_perf_enable()
2234 */
2235int hw_perf_group_sched_in(struct perf_event *leader,
2236 struct perf_cpu_context *cpuctx,
2237 struct perf_event_context *ctx, int cpu)
2238{
2239 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2240 struct perf_event *sub;
2241 int assign[X86_PMC_IDX_MAX];
2242 int n0, n1, ret;
2243
2244 /* n0 = total number of events */
2245 n0 = collect_events(cpuc, leader, true);
2246 if (n0 < 0)
2247 return n0;
2248
2249 ret = x86_schedule_events(cpuc, n0, assign);
2250 if (ret)
2251 return ret;
2252
2253 ret = x86_event_sched_in(leader, cpuctx, cpu);
2254 if (ret)
2255 return ret;
2256
2257 n1 = 1;
2258 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02002259 if (sub->state > PERF_EVENT_STATE_OFF) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02002260 ret = x86_event_sched_in(sub, cpuctx, cpu);
2261 if (ret)
2262 goto undo;
2263 ++n1;
2264 }
2265 }
2266 /*
2267 * copy new assignment, now we know it is possible
2268 * will be used by hw_perf_enable()
2269 */
2270 memcpy(cpuc->assign, assign, n0*sizeof(int));
2271
2272 cpuc->n_events = n0;
2273 cpuc->n_added = n1;
2274 ctx->nr_active += n1;
2275
2276 /*
2277 * 1 means successful and events are active
2278 * This is not quite true because we defer
2279 * actual activation until hw_perf_enable() but
2280 * this way we* ensure caller won't try to enable
2281 * individual events
2282 */
2283 return 1;
2284undo:
2285 x86_event_sched_out(leader, cpuctx, cpu);
2286 n0 = 1;
2287 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2288 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2289 x86_event_sched_out(sub, cpuctx, cpu);
2290 if (++n0 == n1)
2291 break;
2292 }
2293 }
2294 return ret;
2295}
2296
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002297static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2298 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01002299 .next = NULL,
2300 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01002301};
2302
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002303static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04002304 .name = "p6",
2305 .handle_irq = p6_pmu_handle_irq,
2306 .disable_all = p6_pmu_disable_all,
2307 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002308 .enable = p6_pmu_enable_event,
2309 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04002310 .eventsel = MSR_P6_EVNTSEL0,
2311 .perfctr = MSR_P6_PERFCTR0,
2312 .event_map = p6_pmu_event_map,
2313 .raw_event = p6_pmu_raw_event,
2314 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002315 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04002316 .max_period = (1ULL << 31) - 1,
2317 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002318 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04002319 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002320 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04002321 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002322 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04002323 *
2324 * See IA-32 Intel Architecture Software developer manual Vol 3B
2325 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002326 .event_bits = 32,
2327 .event_mask = (1ULL << 32) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002328 .get_event_constraints = intel_get_event_constraints,
2329 .event_constraints = intel_p6_event_constraints
Vince Weaver11d15782009-07-08 17:46:14 -04002330};
2331
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002332static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002333 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02002334 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002335 .disable_all = intel_pmu_disable_all,
2336 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002337 .enable = intel_pmu_enable_event,
2338 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302339 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2340 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002341 .event_map = intel_pmu_event_map,
2342 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302343 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002344 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002345 /*
2346 * Intel PMCs cannot be accessed sanely above 32 bit width,
2347 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002348 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002349 */
2350 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002351 .enable_bts = intel_pmu_enable_bts,
2352 .disable_bts = intel_pmu_disable_bts,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002353 .get_event_constraints = intel_get_event_constraints
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302354};
2355
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002356static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002357 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02002358 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002359 .disable_all = amd_pmu_disable_all,
2360 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002361 .enable = amd_pmu_enable_event,
2362 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302363 .eventsel = MSR_K7_EVNTSEL0,
2364 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002365 .event_map = amd_pmu_event_map,
2366 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302367 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002368 .num_events = 4,
2369 .event_bits = 48,
2370 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002371 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002372 /* use highest bit to detect overflow */
2373 .max_period = (1ULL << 47) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002374 .get_event_constraints = amd_get_event_constraints
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302375};
2376
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002377static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002378{
Vince Weaver11d15782009-07-08 17:46:14 -04002379 switch (boot_cpu_data.x86_model) {
2380 case 1:
2381 case 3: /* Pentium Pro */
2382 case 5:
2383 case 6: /* Pentium II */
2384 case 7:
2385 case 8:
2386 case 11: /* Pentium III */
Vince Weaver11d15782009-07-08 17:46:14 -04002387 case 9:
2388 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002389 /* Pentium M */
2390 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002391 default:
2392 pr_cont("unsupported p6 CPU model %d ",
2393 boot_cpu_data.x86_model);
2394 return -ENODEV;
2395 }
2396
Ingo Molnar04da8a42009-08-11 10:40:08 +02002397 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002398
Vince Weaver11d15782009-07-08 17:46:14 -04002399 return 0;
2400}
2401
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002402static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002403{
Ingo Molnar703e9372008-12-17 10:51:15 +01002404 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002405 union cpuid10_eax eax;
2406 unsigned int unused;
2407 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002408 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002409
Vince Weaver11d15782009-07-08 17:46:14 -04002410 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2411 /* check for P6 processor family */
2412 if (boot_cpu_data.x86 == 6) {
2413 return p6_pmu_init();
2414 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002415 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002416 }
2417 }
Robert Richterda1a7762009-04-29 12:46:58 +02002418
Ingo Molnar241771e2008-12-03 10:39:53 +01002419 /*
2420 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002421 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002422 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002423 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002424 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002425 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002426
Robert Richterfaa28ae2009-04-29 12:47:13 +02002427 version = eax.split.version_id;
2428 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02002429 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002430
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002431 x86_pmu = intel_pmu;
2432 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002433 x86_pmu.num_events = eax.split.num_events;
2434 x86_pmu.event_bits = eax.split.bit_width;
2435 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002436
2437 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002438 * Quirk: v2 perfmon does not report fixed-purpose events, so
2439 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002440 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002441 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302442
Ingo Molnar8326f442009-06-05 20:22:46 +02002443 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002444 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002445 */
2446 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08002447 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2448 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2449 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2450 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002451 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002452 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002453
Stephane Eranian1da53e02010-01-18 10:58:01 +02002454 x86_pmu.event_constraints = intel_core_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002455 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002456 break;
Ingo Molnar8326f442009-06-05 20:22:46 +02002457 case 26:
2458 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002459 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002460
Stephane Eranian1da53e02010-01-18 10:58:01 +02002461 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002462 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002463 break;
2464 case 28:
2465 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002466 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002467
Stephane Eranian1da53e02010-01-18 10:58:01 +02002468 x86_pmu.event_constraints = intel_gen_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002469 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002470 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002471 default:
2472 /*
2473 * default constraints for v2 and up
2474 */
2475 x86_pmu.event_constraints = intel_gen_event_constraints;
2476 pr_cont("generic architected perfmon, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002477 }
Robert Richter72eae042009-04-29 12:47:10 +02002478 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302479}
2480
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002481static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302482{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302483 /* Performance-monitoring supported from K7 and later: */
2484 if (boot_cpu_data.x86 < 6)
2485 return -ENODEV;
2486
Robert Richter4a06bd82009-04-29 12:47:11 +02002487 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002488
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302489 /* Events are common for all AMDs */
2490 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2491 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002492
Robert Richter72eae042009-04-29 12:47:10 +02002493 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302494}
2495
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002496static void __init pmu_check_apic(void)
2497{
2498 if (cpu_has_apic)
2499 return;
2500
2501 x86_pmu.apic = 0;
2502 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2503 pr_info("no hardware sampling interrupt available.\n");
2504}
2505
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002506void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302507{
Robert Richter72eae042009-04-29 12:47:10 +02002508 int err;
2509
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002510 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002511
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302512 switch (boot_cpu_data.x86_vendor) {
2513 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002514 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302515 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302516 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002517 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302518 break;
Robert Richter41389602009-04-29 12:47:00 +02002519 default:
2520 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302521 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002522 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002523 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302524 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002525 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302526
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002527 pmu_check_apic();
2528
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002529 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002530
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002531 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2532 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2533 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2534 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002535 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002536 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2537 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002538
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002539 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2540 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2541 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2542 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002543 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002544
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002545 perf_event_mask |=
2546 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2547 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002548
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002549 perf_events_lapic_init();
2550 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002551
Peter Zijlstra63b14642010-01-22 16:32:17 +01002552 unconstrained = (struct event_constraint)
2553 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2554
Ingo Molnar57c0c152009-09-21 12:20:38 +02002555 pr_info("... version: %d\n", x86_pmu.version);
2556 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2557 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2558 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2559 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2560 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2561 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002562}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002563
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002564static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002565{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002566 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002567}
2568
Robert Richter4aeb0b42009-04-29 12:47:03 +02002569static const struct pmu pmu = {
2570 .enable = x86_pmu_enable,
2571 .disable = x86_pmu_disable,
2572 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002573 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002574};
2575
Stephane Eranian1da53e02010-01-18 10:58:01 +02002576/*
2577 * validate a single event group
2578 *
2579 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01002580 * - check events are compatible which each other
2581 * - events do not compete for the same counter
2582 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02002583 *
2584 * validation ensures the group can be loaded onto the
2585 * PMU if it was the only group available.
2586 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002587static int validate_group(struct perf_event *event)
2588{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002589 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002590 struct cpu_hw_events *fake_cpuc;
2591 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002592
Peter Zijlstra502568d2010-01-22 14:35:46 +01002593 ret = -ENOMEM;
2594 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2595 if (!fake_cpuc)
2596 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002597
Stephane Eranian1da53e02010-01-18 10:58:01 +02002598 /*
2599 * the event is not yet connected with its
2600 * siblings therefore we must first collect
2601 * existing siblings, then add the new event
2602 * before we can simulate the scheduling
2603 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002604 ret = -ENOSPC;
2605 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002606 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002607 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002608
Peter Zijlstra502568d2010-01-22 14:35:46 +01002609 fake_cpuc->n_events = n;
2610 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002611 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002612 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002613
Peter Zijlstra502568d2010-01-22 14:35:46 +01002614 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002615
Peter Zijlstra502568d2010-01-22 14:35:46 +01002616 ret = x86_schedule_events(fake_cpuc, n, NULL);
2617
2618out_free:
2619 kfree(fake_cpuc);
2620out:
2621 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002622}
2623
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002624const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002625{
Stephane Eranian81130702010-01-21 17:39:01 +02002626 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002627 int err;
2628
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002629 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002630 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002631 /*
2632 * we temporarily connect event to its pmu
2633 * such that validate_group() can classify
2634 * it as an x86 event using is_x86_event()
2635 */
2636 tmp = event->pmu;
2637 event->pmu = &pmu;
2638
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002639 if (event->group_leader != event)
2640 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002641
2642 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002643 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002644 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002645 if (event->destroy)
2646 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002647 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002648 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002649
Robert Richter4aeb0b42009-04-29 12:47:03 +02002650 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002651}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002652
2653/*
2654 * callchain support
2655 */
2656
2657static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002658void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002659{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002660 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002661 entry->ip[entry->nr++] = ip;
2662}
2663
Tejun Heo245b2e72009-06-24 15:13:48 +09002664static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2665static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002666
2667
2668static void
2669backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2670{
2671 /* Ignore warnings */
2672}
2673
2674static void backtrace_warning(void *data, char *msg)
2675{
2676 /* Ignore warnings */
2677}
2678
2679static int backtrace_stack(void *data, char *name)
2680{
Ingo Molnar038e8362009-06-15 09:57:59 +02002681 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002682}
2683
2684static void backtrace_address(void *data, unsigned long addr, int reliable)
2685{
2686 struct perf_callchain_entry *entry = data;
2687
2688 if (reliable)
2689 callchain_store(entry, addr);
2690}
2691
2692static const struct stacktrace_ops backtrace_ops = {
2693 .warning = backtrace_warning,
2694 .warning_symbol = backtrace_warning_symbol,
2695 .stack = backtrace_stack,
2696 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002697 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002698};
2699
Ingo Molnar038e8362009-06-15 09:57:59 +02002700#include "../dumpstack.h"
2701
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002702static void
2703perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2704{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002705 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002706 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002707
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01002708 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002709}
2710
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002711/*
2712 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2713 */
2714static unsigned long
2715copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002716{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002717 unsigned long offset, addr = (unsigned long)from;
2718 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2719 unsigned long size, len = 0;
2720 struct page *page;
2721 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002722 int ret;
2723
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002724 do {
2725 ret = __get_user_pages_fast(addr, 1, 0, &page);
2726 if (!ret)
2727 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002728
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002729 offset = addr & (PAGE_SIZE - 1);
2730 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002731
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002732 map = kmap_atomic(page, type);
2733 memcpy(to, map+offset, size);
2734 kunmap_atomic(map, type);
2735 put_page(page);
2736
2737 len += size;
2738 to += size;
2739 addr += size;
2740
2741 } while (len < n);
2742
2743 return len;
2744}
2745
2746static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2747{
2748 unsigned long bytes;
2749
2750 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2751
2752 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002753}
2754
2755static void
2756perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2757{
2758 struct stack_frame frame;
2759 const void __user *fp;
2760
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002761 if (!user_mode(regs))
2762 regs = task_pt_regs(current);
2763
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002764 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002765
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002766 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002767 callchain_store(entry, regs->ip);
2768
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002769 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002770 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002771 frame.return_address = 0;
2772
2773 if (!copy_stack_frame(fp, &frame))
2774 break;
2775
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002776 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002777 break;
2778
2779 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002780 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002781 }
2782}
2783
2784static void
2785perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2786{
2787 int is_user;
2788
2789 if (!regs)
2790 return;
2791
2792 is_user = user_mode(regs);
2793
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002794 if (is_user && current->state != TASK_RUNNING)
2795 return;
2796
2797 if (!is_user)
2798 perf_callchain_kernel(regs, entry);
2799
2800 if (current->mm)
2801 perf_callchain_user(regs, entry);
2802}
2803
2804struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2805{
2806 struct perf_callchain_entry *entry;
2807
2808 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002809 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002810 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002811 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002812
2813 entry->nr = 0;
2814
2815 perf_do_callchain(regs, entry);
2816
2817 return entry;
2818}
Markus Metzger30dd5682009-07-21 15:56:48 +02002819
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002820void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002821{
2822 init_debug_store_on_cpu(cpu);
2823}