blob: 7e181a5097ea1e53ffe9e9f60f84281f6c23e53b [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010026
Ingo Molnar241771e2008-12-03 10:39:53 +010027#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020028#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020029#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010030
Ingo Molnarcdd6c482009-09-21 12:02:48 +020031static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010032
Ingo Molnarcdd6c482009-09-21 12:02:48 +020033/* The maximal number of PEBS events: */
34#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020035
36/* The size of a BTS record in bytes: */
37#define BTS_RECORD_SIZE 24
38
39/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020040#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020041
42/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020043#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020044
45
46/*
47 * Bits in the debugctlmsr controlling branch tracing.
48 */
49#define X86_DEBUGCTL_TR (1 << 6)
50#define X86_DEBUGCTL_BTS (1 << 7)
51#define X86_DEBUGCTL_BTINT (1 << 8)
52#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
53#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
54
55/*
56 * A debug store configuration.
57 *
58 * We only support architectures that use 64bit fields.
59 */
60struct debug_store {
61 u64 bts_buffer_base;
62 u64 bts_index;
63 u64 bts_absolute_maximum;
64 u64 bts_interrupt_threshold;
65 u64 pebs_buffer_base;
66 u64 pebs_index;
67 u64 pebs_absolute_maximum;
68 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020069 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020070};
71
Stephane Eranian1da53e02010-01-18 10:58:01 +020072#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
73
74struct event_constraint {
75 u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)];
76 int code;
77 int cmask;
78};
79
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020081 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020082 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010083 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010084 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020085 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020086
87 int n_events;
88 int n_added;
89 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
90 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Ingo Molnar241771e2008-12-03 10:39:53 +010091};
92
Stephane Eranian1da53e02010-01-18 10:58:01 +020093#define EVENT_CONSTRAINT(c, n, m) { \
94 .code = (c), \
95 .cmask = (m), \
96 .idxmsk[0] = (n) }
Stephane Eranianb6900812009-10-06 16:42:09 +020097
Stephane Eranian1da53e02010-01-18 10:58:01 +020098#define EVENT_CONSTRAINT_END \
99 { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
Stephane Eranianb6900812009-10-06 16:42:09 +0200100
101#define for_each_event_constraint(e, c) \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200102 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200103
Ingo Molnar241771e2008-12-03 10:39:53 +0100104/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200105 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100106 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200107struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200108 const char *name;
109 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800110 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200111 void (*disable_all)(void);
112 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200113 void (*enable)(struct hw_perf_event *, int);
114 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530115 unsigned eventsel;
116 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100117 u64 (*event_map)(int);
118 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530119 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200120 int num_events;
121 int num_events_fixed;
122 int event_bits;
123 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200124 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200125 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200126 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200127 void (*enable_bts)(u64 config);
128 void (*disable_bts)(void);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200129 void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk);
130 void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event);
131 const struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530132};
133
Robert Richter4a06bd82009-04-29 12:47:11 +0200134static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530135
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200136static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100137 .enabled = 1,
138};
Ingo Molnar241771e2008-12-03 10:39:53 +0100139
Stephane Eranian1da53e02010-01-18 10:58:01 +0200140static int x86_perf_event_set_period(struct perf_event *event,
141 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200142
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530143/*
Vince Weaver11d15782009-07-08 17:46:14 -0400144 * Not sure about some of these
145 */
146static const u64 p6_perfmon_event_map[] =
147{
148 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
149 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200150 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
151 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400152 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
153 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
154 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
155};
156
Ingo Molnardfc65092009-09-21 11:31:35 +0200157static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400158{
Ingo Molnardfc65092009-09-21 11:31:35 +0200159 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400160}
161
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200162/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200163 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200164 * We use this to effectively disable a counter.
165 *
166 * L2_RQSTS with 0 MESI unit mask.
167 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200168#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200169
Ingo Molnardfc65092009-09-21 11:31:35 +0200170static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400171{
172#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
173#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
174#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
175#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200176#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400177
178#define P6_EVNTSEL_MASK \
179 (P6_EVNTSEL_EVENT_MASK | \
180 P6_EVNTSEL_UNIT_MASK | \
181 P6_EVNTSEL_EDGE_MASK | \
182 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200183 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400184
Ingo Molnardfc65092009-09-21 11:31:35 +0200185 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400186}
187
Stephane Eranian1da53e02010-01-18 10:58:01 +0200188static struct event_constraint intel_p6_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200189{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200190 EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK), /* FLOPS */
191 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
192 EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
193 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
194 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
195 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
Stephane Eranianb6900812009-10-06 16:42:09 +0200196 EVENT_CONSTRAINT_END
197};
Vince Weaver11d15782009-07-08 17:46:14 -0400198
199/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530200 * Intel PerfMon v3. Used on Core2 and later.
201 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100202static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100203{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200204 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
205 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
206 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
207 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
208 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
209 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
210 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100211};
212
Stephane Eranian1da53e02010-01-18 10:58:01 +0200213static struct event_constraint intel_core_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200214{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200215 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
216 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
217 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
218 EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
219 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
220 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
221 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
222 EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
223 EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
224 EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
225 EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
Stephane Eranianb6900812009-10-06 16:42:09 +0200226 EVENT_CONSTRAINT_END
227};
228
Stephane Eranian1da53e02010-01-18 10:58:01 +0200229static struct event_constraint intel_nehalem_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200230{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200231 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
232 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
233 EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
234 EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
235 EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
236 EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
237 EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
238 EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
239 EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
240 EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
241 EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
242 EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
243 EVENT_CONSTRAINT_END
244};
245
246static struct event_constraint intel_gen_event_constraints[] =
247{
248 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
249 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
Stephane Eranianb6900812009-10-06 16:42:09 +0200250 EVENT_CONSTRAINT_END
251};
252
Ingo Molnardfc65092009-09-21 11:31:35 +0200253static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530254{
Ingo Molnardfc65092009-09-21 11:31:35 +0200255 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530256}
Ingo Molnar241771e2008-12-03 10:39:53 +0100257
Ingo Molnar8326f442009-06-05 20:22:46 +0200258/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200259 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200260 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200261 * 'not supported', -1 means 'hw_event makes no sense on
262 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200263 * ID.
264 */
265
266#define C(x) PERF_COUNT_HW_CACHE_##x
267
268static u64 __read_mostly hw_cache_event_ids
269 [PERF_COUNT_HW_CACHE_MAX]
270 [PERF_COUNT_HW_CACHE_OP_MAX]
271 [PERF_COUNT_HW_CACHE_RESULT_MAX];
272
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900273static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277{
278 [ C(L1D) ] = {
279 [ C(OP_READ) ] = {
280 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
282 },
283 [ C(OP_WRITE) ] = {
284 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
286 },
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
290 },
291 },
292 [ C(L1I ) ] = {
293 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800294 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200295 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
296 },
297 [ C(OP_WRITE) ] = {
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
300 },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = 0x0,
303 [ C(RESULT_MISS) ] = 0x0,
304 },
305 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200306 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200307 [ C(OP_READ) ] = {
308 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
309 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
310 },
311 [ C(OP_WRITE) ] = {
312 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
313 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
314 },
315 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200316 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
317 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200318 },
319 },
320 [ C(DTLB) ] = {
321 [ C(OP_READ) ] = {
322 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
324 },
325 [ C(OP_WRITE) ] = {
326 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
328 },
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0x0,
331 [ C(RESULT_MISS) ] = 0x0,
332 },
333 },
334 [ C(ITLB) ] = {
335 [ C(OP_READ) ] = {
336 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800337 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200338 },
339 [ C(OP_WRITE) ] = {
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
342 },
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
346 },
347 },
348 [ C(BPU ) ] = {
349 [ C(OP_READ) ] = {
350 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
351 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
352 },
353 [ C(OP_WRITE) ] = {
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
356 },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = -1,
359 [ C(RESULT_MISS) ] = -1,
360 },
361 },
362};
363
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900364static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200365 [PERF_COUNT_HW_CACHE_MAX]
366 [PERF_COUNT_HW_CACHE_OP_MAX]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200369 [ C(L1D) ] = {
370 [ C(OP_READ) ] = {
371 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
372 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
373 },
374 [ C(OP_WRITE) ] = {
375 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
376 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
377 },
378 [ C(OP_PREFETCH) ] = {
379 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
380 [ C(RESULT_MISS) ] = 0,
381 },
382 },
383 [ C(L1I ) ] = {
384 [ C(OP_READ) ] = {
385 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
386 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
387 },
388 [ C(OP_WRITE) ] = {
389 [ C(RESULT_ACCESS) ] = -1,
390 [ C(RESULT_MISS) ] = -1,
391 },
392 [ C(OP_PREFETCH) ] = {
393 [ C(RESULT_ACCESS) ] = 0,
394 [ C(RESULT_MISS) ] = 0,
395 },
396 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200397 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200398 [ C(OP_READ) ] = {
399 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
401 },
402 [ C(OP_WRITE) ] = {
403 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
405 },
406 [ C(OP_PREFETCH) ] = {
407 [ C(RESULT_ACCESS) ] = 0,
408 [ C(RESULT_MISS) ] = 0,
409 },
410 },
411 [ C(DTLB) ] = {
412 [ C(OP_READ) ] = {
413 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
415 },
416 [ C(OP_WRITE) ] = {
417 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
419 },
420 [ C(OP_PREFETCH) ] = {
421 [ C(RESULT_ACCESS) ] = 0,
422 [ C(RESULT_MISS) ] = 0,
423 },
424 },
425 [ C(ITLB) ] = {
426 [ C(OP_READ) ] = {
427 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
429 },
430 [ C(OP_WRITE) ] = {
431 [ C(RESULT_ACCESS) ] = -1,
432 [ C(RESULT_MISS) ] = -1,
433 },
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = -1,
436 [ C(RESULT_MISS) ] = -1,
437 },
438 },
439 [ C(BPU ) ] = {
440 [ C(OP_READ) ] = {
441 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
443 },
444 [ C(OP_WRITE) ] = {
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
447 },
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = -1,
450 [ C(RESULT_MISS) ] = -1,
451 },
452 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200453};
454
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900455static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200456 [PERF_COUNT_HW_CACHE_MAX]
457 [PERF_COUNT_HW_CACHE_OP_MAX]
458 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
459{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200460 [ C(L1D) ] = {
461 [ C(OP_READ) ] = {
462 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
463 [ C(RESULT_MISS) ] = 0,
464 },
465 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800466 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200467 [ C(RESULT_MISS) ] = 0,
468 },
469 [ C(OP_PREFETCH) ] = {
470 [ C(RESULT_ACCESS) ] = 0x0,
471 [ C(RESULT_MISS) ] = 0,
472 },
473 },
474 [ C(L1I ) ] = {
475 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800476 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
477 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200478 },
479 [ C(OP_WRITE) ] = {
480 [ C(RESULT_ACCESS) ] = -1,
481 [ C(RESULT_MISS) ] = -1,
482 },
483 [ C(OP_PREFETCH) ] = {
484 [ C(RESULT_ACCESS) ] = 0,
485 [ C(RESULT_MISS) ] = 0,
486 },
487 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200488 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200489 [ C(OP_READ) ] = {
490 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
491 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
492 },
493 [ C(OP_WRITE) ] = {
494 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
495 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
496 },
497 [ C(OP_PREFETCH) ] = {
498 [ C(RESULT_ACCESS) ] = 0,
499 [ C(RESULT_MISS) ] = 0,
500 },
501 },
502 [ C(DTLB) ] = {
503 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800504 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200505 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
506 },
507 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800508 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200509 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
510 },
511 [ C(OP_PREFETCH) ] = {
512 [ C(RESULT_ACCESS) ] = 0,
513 [ C(RESULT_MISS) ] = 0,
514 },
515 },
516 [ C(ITLB) ] = {
517 [ C(OP_READ) ] = {
518 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
519 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
520 },
521 [ C(OP_WRITE) ] = {
522 [ C(RESULT_ACCESS) ] = -1,
523 [ C(RESULT_MISS) ] = -1,
524 },
525 [ C(OP_PREFETCH) ] = {
526 [ C(RESULT_ACCESS) ] = -1,
527 [ C(RESULT_MISS) ] = -1,
528 },
529 },
530 [ C(BPU ) ] = {
531 [ C(OP_READ) ] = {
532 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
533 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
534 },
535 [ C(OP_WRITE) ] = {
536 [ C(RESULT_ACCESS) ] = -1,
537 [ C(RESULT_MISS) ] = -1,
538 },
539 [ C(OP_PREFETCH) ] = {
540 [ C(RESULT_ACCESS) ] = -1,
541 [ C(RESULT_MISS) ] = -1,
542 },
543 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200544};
545
Ingo Molnardfc65092009-09-21 11:31:35 +0200546static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100547{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100548#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
549#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200550#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
551#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200552#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100553
Ingo Molnar128f0482009-06-03 22:19:36 +0200554#define CORE_EVNTSEL_MASK \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200555 (INTEL_ARCH_EVTSEL_MASK | \
556 INTEL_ARCH_UNIT_MASK | \
557 INTEL_ARCH_EDGE_MASK | \
558 INTEL_ARCH_INV_MASK | \
559 INTEL_ARCH_CNT_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100560
Ingo Molnardfc65092009-09-21 11:31:35 +0200561 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100562}
563
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900564static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200565 [PERF_COUNT_HW_CACHE_MAX]
566 [PERF_COUNT_HW_CACHE_OP_MAX]
567 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
568{
569 [ C(L1D) ] = {
570 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530571 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
572 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200573 },
574 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530575 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200576 [ C(RESULT_MISS) ] = 0,
577 },
578 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530579 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
580 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200581 },
582 },
583 [ C(L1I ) ] = {
584 [ C(OP_READ) ] = {
585 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
586 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
587 },
588 [ C(OP_WRITE) ] = {
589 [ C(RESULT_ACCESS) ] = -1,
590 [ C(RESULT_MISS) ] = -1,
591 },
592 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530593 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200594 [ C(RESULT_MISS) ] = 0,
595 },
596 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200597 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200598 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530599 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
600 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200601 },
602 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530603 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200604 [ C(RESULT_MISS) ] = 0,
605 },
606 [ C(OP_PREFETCH) ] = {
607 [ C(RESULT_ACCESS) ] = 0,
608 [ C(RESULT_MISS) ] = 0,
609 },
610 },
611 [ C(DTLB) ] = {
612 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530613 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
614 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200615 },
616 [ C(OP_WRITE) ] = {
617 [ C(RESULT_ACCESS) ] = 0,
618 [ C(RESULT_MISS) ] = 0,
619 },
620 [ C(OP_PREFETCH) ] = {
621 [ C(RESULT_ACCESS) ] = 0,
622 [ C(RESULT_MISS) ] = 0,
623 },
624 },
625 [ C(ITLB) ] = {
626 [ C(OP_READ) ] = {
627 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
628 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
629 },
630 [ C(OP_WRITE) ] = {
631 [ C(RESULT_ACCESS) ] = -1,
632 [ C(RESULT_MISS) ] = -1,
633 },
634 [ C(OP_PREFETCH) ] = {
635 [ C(RESULT_ACCESS) ] = -1,
636 [ C(RESULT_MISS) ] = -1,
637 },
638 },
639 [ C(BPU ) ] = {
640 [ C(OP_READ) ] = {
641 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
642 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
643 },
644 [ C(OP_WRITE) ] = {
645 [ C(RESULT_ACCESS) ] = -1,
646 [ C(RESULT_MISS) ] = -1,
647 },
648 [ C(OP_PREFETCH) ] = {
649 [ C(RESULT_ACCESS) ] = -1,
650 [ C(RESULT_MISS) ] = -1,
651 },
652 },
653};
654
Ingo Molnar241771e2008-12-03 10:39:53 +0100655/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530656 * AMD Performance Monitor K7 and later.
657 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100658static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530659{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200660 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
661 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
662 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
663 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
664 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
665 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530666};
667
Ingo Molnardfc65092009-09-21 11:31:35 +0200668static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530669{
Ingo Molnardfc65092009-09-21 11:31:35 +0200670 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530671}
672
Ingo Molnardfc65092009-09-21 11:31:35 +0200673static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100674{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100675#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
676#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200677#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
678#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200679#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100680
681#define K7_EVNTSEL_MASK \
682 (K7_EVNTSEL_EVENT_MASK | \
683 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200684 K7_EVNTSEL_EDGE_MASK | \
685 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200686 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100687
Ingo Molnardfc65092009-09-21 11:31:35 +0200688 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100689}
690
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530691/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200692 * Propagate event elapsed time into the generic event.
693 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100694 * Returns the delta events processed.
695 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200696static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200697x86_perf_event_update(struct perf_event *event,
698 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100699{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200700 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200701 u64 prev_raw_count, new_raw_count;
702 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100703
Markus Metzger30dd5682009-07-21 15:56:48 +0200704 if (idx == X86_PMC_IDX_FIXED_BTS)
705 return 0;
706
Ingo Molnaree060942008-12-13 09:00:03 +0100707 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100709 *
710 * Our tactic to handle this is to first atomically read and
711 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200712 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100713 */
714again:
715 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200716 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100717
718 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
719 new_raw_count) != prev_raw_count)
720 goto again;
721
722 /*
723 * Now we have the new raw value and have updated the prev
724 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200725 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100726 *
727 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200728 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100729 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200730 delta = (new_raw_count << shift) - (prev_raw_count << shift);
731 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100732
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200733 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100734 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200735
736 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100737}
738
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200739static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200740static DEFINE_MUTEX(pmc_reserve_mutex);
741
742static bool reserve_pmc_hardware(void)
743{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200744#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200745 int i;
746
747 if (nmi_watchdog == NMI_LOCAL_APIC)
748 disable_lapic_nmi_watchdog();
749
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200750 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200751 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200752 goto perfctr_fail;
753 }
754
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200755 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200756 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200757 goto eventsel_fail;
758 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200759#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200760
761 return true;
762
Ingo Molnar04da8a42009-08-11 10:40:08 +0200763#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200764eventsel_fail:
765 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200766 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200767
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200768 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200769
770perfctr_fail:
771 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200772 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200773
774 if (nmi_watchdog == NMI_LOCAL_APIC)
775 enable_lapic_nmi_watchdog();
776
777 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200778#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200779}
780
781static void release_pmc_hardware(void)
782{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200783#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200784 int i;
785
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200786 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200787 release_perfctr_nmi(x86_pmu.perfctr + i);
788 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200789 }
790
791 if (nmi_watchdog == NMI_LOCAL_APIC)
792 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200793#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200794}
795
Markus Metzger30dd5682009-07-21 15:56:48 +0200796static inline bool bts_available(void)
797{
798 return x86_pmu.enable_bts != NULL;
799}
800
801static inline void init_debug_store_on_cpu(int cpu)
802{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200803 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200804
805 if (!ds)
806 return;
807
808 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200809 (u32)((u64)(unsigned long)ds),
810 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200811}
812
813static inline void fini_debug_store_on_cpu(int cpu)
814{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200815 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200816 return;
817
818 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
819}
820
821static void release_bts_hardware(void)
822{
823 int cpu;
824
825 if (!bts_available())
826 return;
827
828 get_online_cpus();
829
830 for_each_online_cpu(cpu)
831 fini_debug_store_on_cpu(cpu);
832
833 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200834 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200835
836 if (!ds)
837 continue;
838
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200839 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200840
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200841 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200842 kfree(ds);
843 }
844
845 put_online_cpus();
846}
847
848static int reserve_bts_hardware(void)
849{
850 int cpu, err = 0;
851
852 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200853 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200854
855 get_online_cpus();
856
857 for_each_possible_cpu(cpu) {
858 struct debug_store *ds;
859 void *buffer;
860
861 err = -ENOMEM;
862 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
863 if (unlikely(!buffer))
864 break;
865
866 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
867 if (unlikely(!ds)) {
868 kfree(buffer);
869 break;
870 }
871
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200872 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200873 ds->bts_index = ds->bts_buffer_base;
874 ds->bts_absolute_maximum =
875 ds->bts_buffer_base + BTS_BUFFER_SIZE;
876 ds->bts_interrupt_threshold =
877 ds->bts_absolute_maximum - BTS_OVFL_TH;
878
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200879 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200880 err = 0;
881 }
882
883 if (err)
884 release_bts_hardware();
885 else {
886 for_each_online_cpu(cpu)
887 init_debug_store_on_cpu(cpu);
888 }
889
890 put_online_cpus();
891
892 return err;
893}
894
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200895static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200896{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200897 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200898 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200899 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200900 mutex_unlock(&pmc_reserve_mutex);
901 }
902}
903
Robert Richter85cf9db2009-04-29 12:47:20 +0200904static inline int x86_pmu_initialized(void)
905{
906 return x86_pmu.handle_irq != NULL;
907}
908
Ingo Molnar8326f442009-06-05 20:22:46 +0200909static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200910set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200911{
912 unsigned int cache_type, cache_op, cache_result;
913 u64 config, val;
914
915 config = attr->config;
916
917 cache_type = (config >> 0) & 0xff;
918 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
919 return -EINVAL;
920
921 cache_op = (config >> 8) & 0xff;
922 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
923 return -EINVAL;
924
925 cache_result = (config >> 16) & 0xff;
926 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
927 return -EINVAL;
928
929 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
930
931 if (val == 0)
932 return -ENOENT;
933
934 if (val == -1)
935 return -EINVAL;
936
937 hwc->config |= val;
938
939 return 0;
940}
941
Markus Metzger30dd5682009-07-21 15:56:48 +0200942static void intel_pmu_enable_bts(u64 config)
943{
944 unsigned long debugctlmsr;
945
946 debugctlmsr = get_debugctlmsr();
947
948 debugctlmsr |= X86_DEBUGCTL_TR;
949 debugctlmsr |= X86_DEBUGCTL_BTS;
950 debugctlmsr |= X86_DEBUGCTL_BTINT;
951
952 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
953 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
954
955 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
956 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
957
958 update_debugctlmsr(debugctlmsr);
959}
960
961static void intel_pmu_disable_bts(void)
962{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200963 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +0200964 unsigned long debugctlmsr;
965
966 if (!cpuc->ds)
967 return;
968
969 debugctlmsr = get_debugctlmsr();
970
971 debugctlmsr &=
972 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
973 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
974
975 update_debugctlmsr(debugctlmsr);
976}
977
Ingo Molnaree060942008-12-13 09:00:03 +0100978/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200979 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100980 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200981static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100982{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200983 struct perf_event_attr *attr = &event->attr;
984 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200985 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200986 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100987
Robert Richter85cf9db2009-04-29 12:47:20 +0200988 if (!x86_pmu_initialized())
989 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100990
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200991 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200992 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200993 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200994 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200995 if (!reserve_pmc_hardware())
996 err = -EBUSY;
997 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200998 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200999 }
1000 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001001 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001002 mutex_unlock(&pmc_reserve_mutex);
1003 }
1004 if (err)
1005 return err;
1006
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001007 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001008
Ingo Molnar241771e2008-12-03 10:39:53 +01001009 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001010 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +01001011 * (keep 'enabled' bit clear for now)
1012 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001013 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +01001014
Stephane Eranianb6900812009-10-06 16:42:09 +02001015 hwc->idx = -1;
1016
Ingo Molnar241771e2008-12-03 10:39:53 +01001017 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001018 * Count user and OS events unless requested not to.
1019 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001020 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001021 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +02001022 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001023 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1024
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001025 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001026 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001027 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001028 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001029 } else {
1030 /*
1031 * If we have a PMU initialized but no APIC
1032 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001033 * events (user-space has to fall back and
1034 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001035 */
1036 if (!x86_pmu.apic)
1037 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001038 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001039
Ingo Molnar241771e2008-12-03 10:39:53 +01001040 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001041 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001042 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001043 if (attr->type == PERF_TYPE_RAW) {
1044 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001045 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001046 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001047
Ingo Molnar8326f442009-06-05 20:22:46 +02001048 if (attr->type == PERF_TYPE_HW_CACHE)
1049 return set_ext_hw_attr(hwc, attr);
1050
1051 if (attr->config >= x86_pmu.max_events)
1052 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001053
Ingo Molnar8326f442009-06-05 20:22:46 +02001054 /*
1055 * The generic map:
1056 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001057 config = x86_pmu.event_map(attr->config);
1058
1059 if (config == 0)
1060 return -ENOENT;
1061
1062 if (config == -1LL)
1063 return -EINVAL;
1064
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001065 /*
1066 * Branch tracing:
1067 */
1068 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001069 (hwc->sample_period == 1)) {
1070 /* BTS is not supported by this architecture. */
1071 if (!bts_available())
1072 return -EOPNOTSUPP;
1073
1074 /* BTS is currently only allowed for user-mode. */
1075 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1076 return -EOPNOTSUPP;
1077 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001078
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001079 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001080
Ingo Molnar241771e2008-12-03 10:39:53 +01001081 return 0;
1082}
1083
Vince Weaver11d15782009-07-08 17:46:14 -04001084static void p6_pmu_disable_all(void)
1085{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001086 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001087 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001088
1089 if (!cpuc->enabled)
1090 return;
1091
1092 cpuc->enabled = 0;
1093 barrier();
1094
1095 /* p6 only has one enable register */
1096 rdmsrl(MSR_P6_EVNTSEL0, val);
1097 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1098 wrmsrl(MSR_P6_EVNTSEL0, val);
1099}
1100
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001101static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001102{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001104
1105 if (!cpuc->enabled)
1106 return;
1107
1108 cpuc->enabled = 0;
1109 barrier();
1110
Ingo Molnar862a1a52008-12-17 13:09:20 +01001111 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001112
1113 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1114 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001115}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301116
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001117static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301118{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001120 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001121
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001122 if (!cpuc->enabled)
1123 return;
1124
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001125 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001126 /*
1127 * ensure we write the disable before we start disabling the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001128 * events proper, so that amd_pmu_enable_event() does the
Robert Richter5f4ec282009-04-29 12:47:04 +02001129 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +01001130 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001131 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301132
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001133 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001134 u64 val;
1135
Robert Richter43f62012009-04-29 16:55:56 +02001136 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001137 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301138 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001139 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1140 continue;
1141 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1142 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301143 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301144}
1145
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001146void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301147{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001148 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1149
Robert Richter85cf9db2009-04-29 12:47:20 +02001150 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001151 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001152
1153 if (cpuc->enabled)
1154 cpuc->n_added = 0;
1155
1156 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301157}
Ingo Molnar241771e2008-12-03 10:39:53 +01001158
Vince Weaver11d15782009-07-08 17:46:14 -04001159static void p6_pmu_enable_all(void)
1160{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001162 unsigned long val;
1163
1164 if (cpuc->enabled)
1165 return;
1166
1167 cpuc->enabled = 1;
1168 barrier();
1169
1170 /* p6 only has one enable register */
1171 rdmsrl(MSR_P6_EVNTSEL0, val);
1172 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1173 wrmsrl(MSR_P6_EVNTSEL0, val);
1174}
1175
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001176static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301177{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001178 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001179
1180 if (cpuc->enabled)
1181 return;
1182
1183 cpuc->enabled = 1;
1184 barrier();
1185
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001186 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001187
1188 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 struct perf_event *event =
1190 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001191
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001192 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001193 return;
1194
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001195 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001196 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301197}
1198
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001199static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301200{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001201 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301202 int idx;
1203
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001204 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001205 return;
1206
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001207 cpuc->enabled = 1;
1208 barrier();
1209
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001210 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1211 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001212 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001213
Robert Richter43f62012009-04-29 16:55:56 +02001214 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001215 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001216
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001217 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001218 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1219 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301220 }
1221}
1222
Stephane Eranian1da53e02010-01-18 10:58:01 +02001223static const struct pmu pmu;
1224
1225static inline int is_x86_event(struct perf_event *event)
1226{
1227 return event->pmu == &pmu;
1228}
1229
1230static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1231{
1232 int i, j , w, num;
1233 int weight, wmax;
1234 unsigned long *c;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001235 unsigned long constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001236 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1237 struct hw_perf_event *hwc;
1238
1239 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1240
1241 for (i = 0; i < n; i++) {
1242 x86_pmu.get_event_constraints(cpuc,
1243 cpuc->event_list[i],
1244 constraints[i]);
1245 }
1246
1247 /*
Stephane Eranian81130702010-01-21 17:39:01 +02001248 * fastpath, try to reuse previous register
1249 */
1250 for (i = 0, num = n; i < n; i++, num--) {
1251 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001252 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +02001253
1254 /* never assigned */
1255 if (hwc->idx == -1)
1256 break;
1257
1258 /* constraint still honored */
1259 if (!test_bit(hwc->idx, c))
1260 break;
1261
1262 /* not already used */
1263 if (test_bit(hwc->idx, used_mask))
1264 break;
1265
1266#if 0
1267 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1268 smp_processor_id(),
1269 hwc->config,
1270 hwc->idx,
1271 assign ? 'y' : 'n');
1272#endif
1273
1274 set_bit(hwc->idx, used_mask);
1275 if (assign)
1276 assign[i] = hwc->idx;
1277 }
1278 if (!num)
1279 goto done;
1280
1281 /*
1282 * begin slow path
1283 */
1284
1285 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1286
1287 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001288 * weight = number of possible counters
1289 *
1290 * 1 = most constrained, only works on one counter
1291 * wmax = least constrained, works on any counter
1292 *
1293 * assign events to counters starting with most
1294 * constrained events.
1295 */
1296 wmax = x86_pmu.num_events;
1297
1298 /*
1299 * when fixed event counters are present,
1300 * wmax is incremented by 1 to account
1301 * for one more choice
1302 */
1303 if (x86_pmu.num_events_fixed)
1304 wmax++;
1305
Stephane Eranian81130702010-01-21 17:39:01 +02001306 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001307 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +02001308 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +01001309 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001310 hwc = &cpuc->event_list[i]->hw;
1311
1312 weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1313 if (weight != w)
1314 continue;
1315
Stephane Eranian1da53e02010-01-18 10:58:01 +02001316 for_each_bit(j, c, X86_PMC_IDX_MAX) {
1317 if (!test_bit(j, used_mask))
1318 break;
1319 }
1320
1321 if (j == X86_PMC_IDX_MAX)
1322 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001323
1324#if 0
Stephane Eranian81130702010-01-21 17:39:01 +02001325 pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
Stephane Eranian1da53e02010-01-18 10:58:01 +02001326 smp_processor_id(),
1327 hwc->config,
1328 j,
1329 assign ? 'y' : 'n');
1330#endif
1331
Stephane Eranian81130702010-01-21 17:39:01 +02001332 set_bit(j, used_mask);
1333
Stephane Eranian1da53e02010-01-18 10:58:01 +02001334 if (assign)
1335 assign[i] = j;
1336 num--;
1337 }
1338 }
Stephane Eranian81130702010-01-21 17:39:01 +02001339done:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001340 /*
1341 * scheduling failed or is just a simulation,
1342 * free resources if necessary
1343 */
1344 if (!assign || num) {
1345 for (i = 0; i < n; i++) {
1346 if (x86_pmu.put_event_constraints)
1347 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1348 }
1349 }
1350 return num ? -ENOSPC : 0;
1351}
1352
1353/*
1354 * dogrp: true if must collect siblings events (group)
1355 * returns total number of events and error code
1356 */
1357static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1358{
1359 struct perf_event *event;
1360 int n, max_count;
1361
1362 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1363
1364 /* current number of events already accepted */
1365 n = cpuc->n_events;
1366
1367 if (is_x86_event(leader)) {
1368 if (n >= max_count)
1369 return -ENOSPC;
1370 cpuc->event_list[n] = leader;
1371 n++;
1372 }
1373 if (!dogrp)
1374 return n;
1375
1376 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1377 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +02001378 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001379 continue;
1380
1381 if (n >= max_count)
1382 return -ENOSPC;
1383
1384 cpuc->event_list[n] = event;
1385 n++;
1386 }
1387 return n;
1388}
1389
1390
1391static inline void x86_assign_hw_event(struct perf_event *event,
1392 struct hw_perf_event *hwc, int idx)
1393{
1394 hwc->idx = idx;
1395
1396 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1397 hwc->config_base = 0;
1398 hwc->event_base = 0;
1399 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1400 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1401 /*
1402 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1403 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1404 */
1405 hwc->event_base =
1406 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1407 } else {
1408 hwc->config_base = x86_pmu.eventsel;
1409 hwc->event_base = x86_pmu.perfctr;
1410 }
1411}
1412
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001413void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001414{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001415 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1416 struct perf_event *event;
1417 struct hw_perf_event *hwc;
1418 int i;
1419
Robert Richter85cf9db2009-04-29 12:47:20 +02001420 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001421 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001422 if (cpuc->n_added) {
1423 /*
1424 * apply assignment obtained either from
1425 * hw_perf_group_sched_in() or x86_pmu_enable()
1426 *
1427 * step1: save events moving to new counters
1428 * step2: reprogram moved events into new counters
1429 */
1430 for (i = 0; i < cpuc->n_events; i++) {
1431
1432 event = cpuc->event_list[i];
1433 hwc = &event->hw;
1434
1435 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1436 continue;
1437
1438 x86_pmu.disable(hwc, hwc->idx);
1439
1440 clear_bit(hwc->idx, cpuc->active_mask);
1441 barrier();
1442 cpuc->events[hwc->idx] = NULL;
1443
1444 x86_perf_event_update(event, hwc, hwc->idx);
1445
1446 hwc->idx = -1;
1447 }
1448
1449 for (i = 0; i < cpuc->n_events; i++) {
1450
1451 event = cpuc->event_list[i];
1452 hwc = &event->hw;
1453
1454 if (hwc->idx == -1) {
1455 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1456 x86_perf_event_set_period(event, hwc, hwc->idx);
1457 }
1458 /*
1459 * need to mark as active because x86_pmu_disable()
1460 * clear active_mask and eventsp[] yet it preserves
1461 * idx
1462 */
1463 set_bit(hwc->idx, cpuc->active_mask);
1464 cpuc->events[hwc->idx] = event;
1465
1466 x86_pmu.enable(hwc, hwc->idx);
1467 perf_event_update_userpage(event);
1468 }
1469 cpuc->n_added = 0;
1470 perf_events_lapic_init();
1471 }
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001472 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001473}
Ingo Molnaree060942008-12-13 09:00:03 +01001474
Robert Richter19d84da2009-04-29 12:47:25 +02001475static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001476{
1477 u64 status;
1478
1479 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1480
1481 return status;
1482}
1483
Robert Richterdee5d902009-04-29 12:47:07 +02001484static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001485{
1486 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1487}
1488
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001489static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001490{
Vince Weaver11d15782009-07-08 17:46:14 -04001491 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001492 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001493}
1494
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001495static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001496{
Vince Weaver11d15782009-07-08 17:46:14 -04001497 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001498}
1499
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001500static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001501intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001502{
1503 int idx = __idx - X86_PMC_IDX_FIXED;
1504 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001505
1506 mask = 0xfULL << (idx * 4);
1507
1508 rdmsrl(hwc->config_base, ctrl_val);
1509 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001510 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1511}
1512
1513static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001514p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001515{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001516 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1517 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001518
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001519 if (cpuc->enabled)
1520 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001521
1522 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001523}
1524
1525static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001526intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001527{
Markus Metzger30dd5682009-07-21 15:56:48 +02001528 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1529 intel_pmu_disable_bts();
1530 return;
1531 }
1532
Robert Richterd4369892009-04-29 12:47:19 +02001533 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1534 intel_pmu_disable_fixed(hwc, idx);
1535 return;
1536 }
1537
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001538 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001539}
1540
1541static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001542amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Robert Richterd4369892009-04-29 12:47:19 +02001543{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001544 x86_pmu_disable_event(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001545}
1546
Tejun Heo245b2e72009-06-24 15:13:48 +09001547static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001548
Ingo Molnaree060942008-12-13 09:00:03 +01001549/*
1550 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001551 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001552 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001553static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001554x86_perf_event_set_period(struct perf_event *event,
1555 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001556{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001557 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001558 s64 period = hwc->sample_period;
1559 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001560
Markus Metzger30dd5682009-07-21 15:56:48 +02001561 if (idx == X86_PMC_IDX_FIXED_BTS)
1562 return 0;
1563
Ingo Molnaree060942008-12-13 09:00:03 +01001564 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001565 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001566 */
1567 if (unlikely(left <= -period)) {
1568 left = period;
1569 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001570 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001571 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001572 }
1573
1574 if (unlikely(left <= 0)) {
1575 left += period;
1576 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001577 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001578 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001579 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001580 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001581 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001582 */
1583 if (unlikely(left < 2))
1584 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001585
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001586 if (left > x86_pmu.max_period)
1587 left = x86_pmu.max_period;
1588
Tejun Heo245b2e72009-06-24 15:13:48 +09001589 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001590
1591 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001592 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001593 * mark it to be able to extra future deltas:
1594 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001595 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001596
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597 err = checking_wrmsrl(hwc->event_base + idx,
1598 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001599
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001600 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001601
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001602 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001603}
1604
1605static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001606intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001607{
1608 int idx = __idx - X86_PMC_IDX_FIXED;
1609 u64 ctrl_val, bits, mask;
1610 int err;
1611
1612 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001613 * Enable IRQ generation (0x8),
1614 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1615 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001616 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001617 bits = 0x8ULL;
1618 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1619 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001620 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1621 bits |= 0x1;
1622 bits <<= (idx * 4);
1623 mask = 0xfULL << (idx * 4);
1624
1625 rdmsrl(hwc->config_base, ctrl_val);
1626 ctrl_val &= ~mask;
1627 ctrl_val |= bits;
1628 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001629}
1630
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001631static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001632{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001633 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001634 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001635
Peter Zijlstra984b8382009-07-10 09:59:56 +02001636 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001637 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001638 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1639
1640 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001641}
1642
1643
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001644static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001645{
Markus Metzger30dd5682009-07-21 15:56:48 +02001646 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001648 return;
1649
1650 intel_pmu_enable_bts(hwc->config);
1651 return;
1652 }
1653
Robert Richter7c90cc42009-04-29 12:47:18 +02001654 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1655 intel_pmu_enable_fixed(hwc, idx);
1656 return;
1657 }
1658
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001659 x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001660}
1661
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001662static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001663{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001664 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001665
1666 if (cpuc->enabled)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001667 x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001668}
1669
Ingo Molnaree060942008-12-13 09:00:03 +01001670/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001671 * activate a single event
1672 *
1673 * The event is added to the group of enabled events
1674 * but only if it can be scehduled with existing events.
1675 *
1676 * Called with PMU disabled. If successful and return value 1,
1677 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001678 */
1679static int x86_pmu_enable(struct perf_event *event)
1680{
1681 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001682 struct hw_perf_event *hwc;
1683 int assign[X86_PMC_IDX_MAX];
1684 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001685
Stephane Eranian1da53e02010-01-18 10:58:01 +02001686 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001687
Stephane Eranian1da53e02010-01-18 10:58:01 +02001688 n0 = cpuc->n_events;
1689 n = collect_events(cpuc, event, false);
1690 if (n < 0)
1691 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001692
Stephane Eranian1da53e02010-01-18 10:58:01 +02001693 ret = x86_schedule_events(cpuc, n, assign);
1694 if (ret)
1695 return ret;
1696 /*
1697 * copy new assignment, now we know it is possible
1698 * will be used by hw_perf_enable()
1699 */
1700 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001701
Stephane Eranian1da53e02010-01-18 10:58:01 +02001702 cpuc->n_events = n;
1703 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001704
Stephane Eranian1da53e02010-01-18 10:58:01 +02001705 if (hwc->idx != -1)
1706 x86_perf_event_set_period(event, hwc, hwc->idx);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001707
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001708 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001709}
1710
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001711static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001712{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001713 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1714 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001715
1716 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001717 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001718 return;
1719
1720 x86_pmu.enable(hwc, hwc->idx);
1721}
1722
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001723void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001724{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001725 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001726 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001727 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001728 int cpu, idx;
1729
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001730 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001731 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001732
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001733 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001734
1735 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001736 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001737
Robert Richterfaa28ae2009-04-29 12:47:13 +02001738 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301739 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1740 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1741 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1742 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001743
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301744 pr_info("\n");
1745 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1746 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1747 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1748 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301749 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001750 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001751
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001752 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001753 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1754 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001755
Tejun Heo245b2e72009-06-24 15:13:48 +09001756 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001757
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301758 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001759 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301760 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001761 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301762 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001763 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001764 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001765 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001766 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1767
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301768 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001769 cpu, idx, pmc_count);
1770 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001771 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001772}
1773
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001774static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001775{
1776 struct debug_store *ds = cpuc->ds;
1777 struct bts_record {
1778 u64 from;
1779 u64 to;
1780 u64 flags;
1781 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001782 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001783 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001784 struct perf_output_handle handle;
1785 struct perf_event_header header;
1786 struct perf_sample_data data;
1787 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001788
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001789 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001790 return;
1791
1792 if (!ds)
1793 return;
1794
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001795 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1796 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001797
Markus Metzger5622f292009-09-15 13:00:23 +02001798 if (top <= at)
1799 return;
1800
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001801 ds->bts_index = ds->bts_buffer_base;
1802
Markus Metzger30dd5682009-07-21 15:56:48 +02001803
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001804 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001805 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001806 data.raw = NULL;
Markus Metzger5622f292009-09-15 13:00:23 +02001807 regs.ip = 0;
1808
1809 /*
1810 * Prepare a generic sample, i.e. fill in the invariant fields.
1811 * We will overwrite the from and to address before we output
1812 * the sample.
1813 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001814 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001815
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001816 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001817 header.size * (top - at), 1, 1))
1818 return;
1819
1820 for (; at < top; at++) {
1821 data.ip = at->from;
1822 data.addr = at->to;
1823
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001824 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001825 }
1826
Markus Metzger5622f292009-09-15 13:00:23 +02001827 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001828
1829 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001830 event->hw.interrupts++;
1831 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001832}
1833
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001834static void x86_pmu_disable(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001835{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001836 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1837 struct hw_perf_event *hwc = &event->hw;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001838 int i, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001839
Robert Richter09534232009-04-29 12:47:16 +02001840 /*
1841 * Must be done before we disable, otherwise the nmi handler
1842 * could reenable again:
1843 */
Robert Richter43f62012009-04-29 16:55:56 +02001844 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001845 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001846
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001847 /*
1848 * Make sure the cleared pointer becomes visible before we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001849 * (potentially) free the event:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001850 */
Robert Richter527e26a2009-04-29 12:47:02 +02001851 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001852
Ingo Molnaree060942008-12-13 09:00:03 +01001853 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001854 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001855 * that we are disabling:
1856 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001857 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001858
1859 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001860 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1861 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001862
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001863 cpuc->events[idx] = NULL;
Peter Zijlstra194002b2009-06-22 16:35:24 +02001864
Stephane Eranian1da53e02010-01-18 10:58:01 +02001865 for (i = 0; i < cpuc->n_events; i++) {
1866 if (event == cpuc->event_list[i]) {
1867
1868 if (x86_pmu.put_event_constraints)
1869 x86_pmu.put_event_constraints(cpuc, event);
1870
1871 while (++i < cpuc->n_events)
1872 cpuc->event_list[i-1] = cpuc->event_list[i];
1873
1874 --cpuc->n_events;
1875 }
1876 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001877 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001878}
1879
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001880/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001881 * Save and restart an expired event. Called by NMI contexts,
1882 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001883 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001884static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001885{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001886 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001887 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001888 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001889
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001890 x86_perf_event_update(event, hwc, idx);
1891 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001892
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001893 if (event->state == PERF_EVENT_STATE_ACTIVE)
1894 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001895
1896 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001897}
1898
Ingo Molnaraaba9802009-05-26 08:10:00 +02001899static void intel_pmu_reset(void)
1900{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001901 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001902 unsigned long flags;
1903 int idx;
1904
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001905 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02001906 return;
1907
1908 local_irq_save(flags);
1909
1910 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1911
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001912 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001913 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1914 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1915 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001916 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02001917 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1918 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001919 if (ds)
1920 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02001921
1922 local_irq_restore(flags);
1923}
1924
Vince Weaver11d15782009-07-08 17:46:14 -04001925static int p6_pmu_handle_irq(struct pt_regs *regs)
1926{
1927 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001928 struct cpu_hw_events *cpuc;
1929 struct perf_event *event;
1930 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001931 int idx, handled = 0;
1932 u64 val;
1933
Vince Weaver11d15782009-07-08 17:46:14 -04001934 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001935 data.raw = NULL;
Vince Weaver11d15782009-07-08 17:46:14 -04001936
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001937 cpuc = &__get_cpu_var(cpu_hw_events);
Vince Weaver11d15782009-07-08 17:46:14 -04001938
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001939 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Vince Weaver11d15782009-07-08 17:46:14 -04001940 if (!test_bit(idx, cpuc->active_mask))
1941 continue;
1942
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001943 event = cpuc->events[idx];
1944 hwc = &event->hw;
Vince Weaver11d15782009-07-08 17:46:14 -04001945
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001946 val = x86_perf_event_update(event, hwc, idx);
1947 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Vince Weaver11d15782009-07-08 17:46:14 -04001948 continue;
1949
1950 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001951 * event overflow
Vince Weaver11d15782009-07-08 17:46:14 -04001952 */
1953 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001954 data.period = event->hw.last_period;
Vince Weaver11d15782009-07-08 17:46:14 -04001955
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001956 if (!x86_perf_event_set_period(event, hwc, idx))
Vince Weaver11d15782009-07-08 17:46:14 -04001957 continue;
1958
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001959 if (perf_event_overflow(event, 1, &data, regs))
1960 p6_pmu_disable_event(hwc, idx);
Vince Weaver11d15782009-07-08 17:46:14 -04001961 }
1962
1963 if (handled)
1964 inc_irq_stat(apic_perf_irqs);
1965
1966 return handled;
1967}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001968
Ingo Molnar241771e2008-12-03 10:39:53 +01001969/*
1970 * This handler is triggered by the local APIC, so the APIC IRQ handling
1971 * rules apply:
1972 */
Yong Wanga3288102009-06-03 13:12:55 +08001973static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001974{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001975 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001976 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001977 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001978 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001979
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001980 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001981 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001982
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001983 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01001984
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001985 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02001986 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02001987 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001988 if (!status) {
1989 perf_enable();
1990 return 0;
1991 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001992
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001993 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001994again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001995 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001996 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1997 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001998 intel_pmu_reset();
1999 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002000 return 1;
2001 }
2002
Mike Galbraithd278c482009-02-09 07:38:50 +01002003 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002004 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01002005 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002006 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01002007
2008 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02002009 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01002010 continue;
2011
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002012 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002013 continue;
2014
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002015 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02002016
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002017 if (perf_event_overflow(event, 1, &data, regs))
2018 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01002019 }
2020
Robert Richterdee5d902009-04-29 12:47:07 +02002021 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01002022
2023 /*
2024 * Repeat if there is more work to be done:
2025 */
Robert Richter19d84da2009-04-29 12:47:25 +02002026 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01002027 if (status)
2028 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002029
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002030 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002031
2032 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01002033}
2034
Yong Wanga3288102009-06-03 13:12:55 +08002035static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02002036{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002037 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002038 struct cpu_hw_events *cpuc;
2039 struct perf_event *event;
2040 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04002041 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002042 u64 val;
2043
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002044 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002045 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002046
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002047 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002048
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002049 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02002050 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02002051 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002052
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002053 event = cpuc->events[idx];
2054 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002055
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002056 val = x86_perf_event_update(event, hwc, idx);
2057 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002058 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002059
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002060 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002062 */
2063 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002064 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002065
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002066 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002067 continue;
2068
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002069 if (perf_event_overflow(event, 1, &data, regs))
2070 amd_pmu_disable_event(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002071 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002072
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002073 if (handled)
2074 inc_irq_stat(apic_perf_irqs);
2075
Robert Richtera29aa8a2009-04-29 12:47:21 +02002076 return handled;
2077}
Robert Richter39d81ea2009-04-29 12:47:05 +02002078
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002079void smp_perf_pending_interrupt(struct pt_regs *regs)
2080{
2081 irq_enter();
2082 ack_APIC_irq();
2083 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002084 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002085 irq_exit();
2086}
2087
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002088void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002089{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002090#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02002091 if (!x86_pmu.apic || !x86_pmu_initialized())
2092 return;
2093
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002094 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002095#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002096}
2097
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002098void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002099{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002100#ifdef CONFIG_X86_LOCAL_APIC
2101 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01002102 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02002103
Ingo Molnar241771e2008-12-03 10:39:53 +01002104 /*
Yong Wangc323d952009-05-29 13:28:35 +08002105 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01002106 */
Yong Wangc323d952009-05-29 13:28:35 +08002107 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002108#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01002109}
2110
2111static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002112perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01002113 unsigned long cmd, void *__args)
2114{
2115 struct die_args *args = __args;
2116 struct pt_regs *regs;
2117
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002118 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02002119 return NOTIFY_DONE;
2120
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002121 switch (cmd) {
2122 case DIE_NMI:
2123 case DIE_NMI_IPI:
2124 break;
2125
2126 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01002127 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002128 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002129
2130 regs = args->regs;
2131
Ingo Molnar04da8a42009-08-11 10:40:08 +02002132#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01002133 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002134#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002135 /*
2136 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002137 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002138 *
2139 * If the first NMI handles both, the latter will be empty and daze
2140 * the CPU.
2141 */
Yong Wanga3288102009-06-03 13:12:55 +08002142 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002143
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002144 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01002145}
2146
Stephane Eranian1da53e02010-01-18 10:58:01 +02002147static struct event_constraint bts_constraint = {
2148 .code = 0,
2149 .cmask = 0,
2150 .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2151};
2152
2153static int intel_special_constraints(struct perf_event *event,
2154 u64 *idxmsk)
2155{
2156 unsigned int hw_event;
2157
2158 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2159
2160 if (unlikely((hw_event ==
2161 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2162 (event->hw.sample_period == 1))) {
2163
2164 bitmap_copy((unsigned long *)idxmsk,
2165 (unsigned long *)bts_constraint.idxmsk,
2166 X86_PMC_IDX_MAX);
2167 return 1;
2168 }
2169 return 0;
2170}
2171
2172static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2173 struct perf_event *event,
2174 u64 *idxmsk)
2175{
2176 const struct event_constraint *c;
2177
2178 /*
2179 * cleanup bitmask
2180 */
2181 bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX);
2182
2183 if (intel_special_constraints(event, idxmsk))
2184 return;
2185
2186 if (x86_pmu.event_constraints) {
2187 for_each_event_constraint(c, x86_pmu.event_constraints) {
2188 if ((event->hw.config & c->cmask) == c->code) {
2189
2190 bitmap_copy((unsigned long *)idxmsk,
2191 (unsigned long *)c->idxmsk,
2192 X86_PMC_IDX_MAX);
2193 return;
2194 }
2195 }
2196 }
2197 /* no constraints, means supports all generic counters */
2198 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2199}
2200
2201static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2202 struct perf_event *event,
2203 u64 *idxmsk)
2204{
Stephane Eranian81130702010-01-21 17:39:01 +02002205 /* no constraints, means supports all generic counters */
2206 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002207}
2208
2209static int x86_event_sched_in(struct perf_event *event,
2210 struct perf_cpu_context *cpuctx, int cpu)
2211{
2212 int ret = 0;
2213
2214 event->state = PERF_EVENT_STATE_ACTIVE;
2215 event->oncpu = cpu;
2216 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2217
2218 if (!is_x86_event(event))
2219 ret = event->pmu->enable(event);
2220
2221 if (!ret && !is_software_event(event))
2222 cpuctx->active_oncpu++;
2223
2224 if (!ret && event->attr.exclusive)
2225 cpuctx->exclusive = 1;
2226
2227 return ret;
2228}
2229
2230static void x86_event_sched_out(struct perf_event *event,
2231 struct perf_cpu_context *cpuctx, int cpu)
2232{
2233 event->state = PERF_EVENT_STATE_INACTIVE;
2234 event->oncpu = -1;
2235
2236 if (!is_x86_event(event))
2237 event->pmu->disable(event);
2238
2239 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2240
2241 if (!is_software_event(event))
2242 cpuctx->active_oncpu--;
2243
2244 if (event->attr.exclusive || !cpuctx->active_oncpu)
2245 cpuctx->exclusive = 0;
2246}
2247
2248/*
2249 * Called to enable a whole group of events.
2250 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2251 * Assumes the caller has disabled interrupts and has
2252 * frozen the PMU with hw_perf_save_disable.
2253 *
2254 * called with PMU disabled. If successful and return value 1,
2255 * then guaranteed to call perf_enable() and hw_perf_enable()
2256 */
2257int hw_perf_group_sched_in(struct perf_event *leader,
2258 struct perf_cpu_context *cpuctx,
2259 struct perf_event_context *ctx, int cpu)
2260{
2261 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2262 struct perf_event *sub;
2263 int assign[X86_PMC_IDX_MAX];
2264 int n0, n1, ret;
2265
2266 /* n0 = total number of events */
2267 n0 = collect_events(cpuc, leader, true);
2268 if (n0 < 0)
2269 return n0;
2270
2271 ret = x86_schedule_events(cpuc, n0, assign);
2272 if (ret)
2273 return ret;
2274
2275 ret = x86_event_sched_in(leader, cpuctx, cpu);
2276 if (ret)
2277 return ret;
2278
2279 n1 = 1;
2280 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02002281 if (sub->state > PERF_EVENT_STATE_OFF) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02002282 ret = x86_event_sched_in(sub, cpuctx, cpu);
2283 if (ret)
2284 goto undo;
2285 ++n1;
2286 }
2287 }
2288 /*
2289 * copy new assignment, now we know it is possible
2290 * will be used by hw_perf_enable()
2291 */
2292 memcpy(cpuc->assign, assign, n0*sizeof(int));
2293
2294 cpuc->n_events = n0;
2295 cpuc->n_added = n1;
2296 ctx->nr_active += n1;
2297
2298 /*
2299 * 1 means successful and events are active
2300 * This is not quite true because we defer
2301 * actual activation until hw_perf_enable() but
2302 * this way we* ensure caller won't try to enable
2303 * individual events
2304 */
2305 return 1;
2306undo:
2307 x86_event_sched_out(leader, cpuctx, cpu);
2308 n0 = 1;
2309 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2310 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2311 x86_event_sched_out(sub, cpuctx, cpu);
2312 if (++n0 == n1)
2313 break;
2314 }
2315 }
2316 return ret;
2317}
2318
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002319static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2320 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01002321 .next = NULL,
2322 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01002323};
2324
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002325static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04002326 .name = "p6",
2327 .handle_irq = p6_pmu_handle_irq,
2328 .disable_all = p6_pmu_disable_all,
2329 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002330 .enable = p6_pmu_enable_event,
2331 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04002332 .eventsel = MSR_P6_EVNTSEL0,
2333 .perfctr = MSR_P6_PERFCTR0,
2334 .event_map = p6_pmu_event_map,
2335 .raw_event = p6_pmu_raw_event,
2336 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002337 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04002338 .max_period = (1ULL << 31) - 1,
2339 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002340 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04002341 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002342 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04002343 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002344 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04002345 *
2346 * See IA-32 Intel Architecture Software developer manual Vol 3B
2347 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002348 .event_bits = 32,
2349 .event_mask = (1ULL << 32) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002350 .get_event_constraints = intel_get_event_constraints,
2351 .event_constraints = intel_p6_event_constraints
Vince Weaver11d15782009-07-08 17:46:14 -04002352};
2353
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002354static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002355 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02002356 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002357 .disable_all = intel_pmu_disable_all,
2358 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002359 .enable = intel_pmu_enable_event,
2360 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302361 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2362 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002363 .event_map = intel_pmu_event_map,
2364 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302365 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002366 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002367 /*
2368 * Intel PMCs cannot be accessed sanely above 32 bit width,
2369 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002370 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002371 */
2372 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002373 .enable_bts = intel_pmu_enable_bts,
2374 .disable_bts = intel_pmu_disable_bts,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002375 .get_event_constraints = intel_get_event_constraints
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302376};
2377
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002378static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002379 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02002380 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002381 .disable_all = amd_pmu_disable_all,
2382 .enable_all = amd_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002383 .enable = amd_pmu_enable_event,
2384 .disable = amd_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302385 .eventsel = MSR_K7_EVNTSEL0,
2386 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002387 .event_map = amd_pmu_event_map,
2388 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302389 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002390 .num_events = 4,
2391 .event_bits = 48,
2392 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002393 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002394 /* use highest bit to detect overflow */
2395 .max_period = (1ULL << 47) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002396 .get_event_constraints = amd_get_event_constraints
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302397};
2398
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002399static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002400{
Vince Weaver11d15782009-07-08 17:46:14 -04002401 switch (boot_cpu_data.x86_model) {
2402 case 1:
2403 case 3: /* Pentium Pro */
2404 case 5:
2405 case 6: /* Pentium II */
2406 case 7:
2407 case 8:
2408 case 11: /* Pentium III */
Vince Weaver11d15782009-07-08 17:46:14 -04002409 case 9:
2410 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002411 /* Pentium M */
2412 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002413 default:
2414 pr_cont("unsupported p6 CPU model %d ",
2415 boot_cpu_data.x86_model);
2416 return -ENODEV;
2417 }
2418
Ingo Molnar04da8a42009-08-11 10:40:08 +02002419 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002420
Vince Weaver11d15782009-07-08 17:46:14 -04002421 return 0;
2422}
2423
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002424static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002425{
Ingo Molnar703e9372008-12-17 10:51:15 +01002426 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002427 union cpuid10_eax eax;
2428 unsigned int unused;
2429 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002430 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002431
Vince Weaver11d15782009-07-08 17:46:14 -04002432 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2433 /* check for P6 processor family */
2434 if (boot_cpu_data.x86 == 6) {
2435 return p6_pmu_init();
2436 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002437 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002438 }
2439 }
Robert Richterda1a7762009-04-29 12:46:58 +02002440
Ingo Molnar241771e2008-12-03 10:39:53 +01002441 /*
2442 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002443 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002444 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002445 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002446 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002447 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002448
Robert Richterfaa28ae2009-04-29 12:47:13 +02002449 version = eax.split.version_id;
2450 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02002451 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002452
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002453 x86_pmu = intel_pmu;
2454 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002455 x86_pmu.num_events = eax.split.num_events;
2456 x86_pmu.event_bits = eax.split.bit_width;
2457 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002458
2459 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002460 * Quirk: v2 perfmon does not report fixed-purpose events, so
2461 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002462 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002463 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302464
Ingo Molnar8326f442009-06-05 20:22:46 +02002465 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002466 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002467 */
2468 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08002469 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2470 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2471 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2472 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002473 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002474 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002475
Stephane Eranian1da53e02010-01-18 10:58:01 +02002476 x86_pmu.event_constraints = intel_core_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002477 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002478 break;
Ingo Molnar8326f442009-06-05 20:22:46 +02002479 case 26:
2480 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002481 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002482
Stephane Eranian1da53e02010-01-18 10:58:01 +02002483 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002484 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002485 break;
2486 case 28:
2487 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002488 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002489
Stephane Eranian1da53e02010-01-18 10:58:01 +02002490 x86_pmu.event_constraints = intel_gen_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002491 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002492 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002493 default:
2494 /*
2495 * default constraints for v2 and up
2496 */
2497 x86_pmu.event_constraints = intel_gen_event_constraints;
2498 pr_cont("generic architected perfmon, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002499 }
Robert Richter72eae042009-04-29 12:47:10 +02002500 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302501}
2502
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002503static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302504{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302505 /* Performance-monitoring supported from K7 and later: */
2506 if (boot_cpu_data.x86 < 6)
2507 return -ENODEV;
2508
Robert Richter4a06bd82009-04-29 12:47:11 +02002509 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002510
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302511 /* Events are common for all AMDs */
2512 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2513 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002514
Robert Richter72eae042009-04-29 12:47:10 +02002515 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302516}
2517
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002518static void __init pmu_check_apic(void)
2519{
2520 if (cpu_has_apic)
2521 return;
2522
2523 x86_pmu.apic = 0;
2524 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2525 pr_info("no hardware sampling interrupt available.\n");
2526}
2527
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002528void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302529{
Robert Richter72eae042009-04-29 12:47:10 +02002530 int err;
2531
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002532 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002533
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302534 switch (boot_cpu_data.x86_vendor) {
2535 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002536 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302537 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302538 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002539 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302540 break;
Robert Richter41389602009-04-29 12:47:00 +02002541 default:
2542 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302543 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002544 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002545 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302546 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002547 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302548
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002549 pmu_check_apic();
2550
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002551 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002552
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002553 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2554 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2555 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2556 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002557 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002558 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2559 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002560
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002561 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2562 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2563 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2564 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002565 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002566
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002567 perf_event_mask |=
2568 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2569 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002570
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002571 perf_events_lapic_init();
2572 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002573
Ingo Molnar57c0c152009-09-21 12:20:38 +02002574 pr_info("... version: %d\n", x86_pmu.version);
2575 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2576 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2577 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2578 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2579 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2580 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002581}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002582
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002583static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002584{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002585 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002586}
2587
Robert Richter4aeb0b42009-04-29 12:47:03 +02002588static const struct pmu pmu = {
2589 .enable = x86_pmu_enable,
2590 .disable = x86_pmu_disable,
2591 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002592 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002593};
2594
Stephane Eranian1da53e02010-01-18 10:58:01 +02002595/*
2596 * validate a single event group
2597 *
2598 * validation include:
2599 * - check events are compatible which each other
2600 * - events do not compete for the same counter
2601 * - number of events <= number of counters
2602 *
2603 * validation ensures the group can be loaded onto the
2604 * PMU if it was the only group available.
2605 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002606static int validate_group(struct perf_event *event)
2607{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002608 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002609 struct cpu_hw_events *fake_cpuc;
2610 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002611
Peter Zijlstra502568d2010-01-22 14:35:46 +01002612 ret = -ENOMEM;
2613 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2614 if (!fake_cpuc)
2615 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002616
Stephane Eranian1da53e02010-01-18 10:58:01 +02002617 /*
2618 * the event is not yet connected with its
2619 * siblings therefore we must first collect
2620 * existing siblings, then add the new event
2621 * before we can simulate the scheduling
2622 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002623 ret = -ENOSPC;
2624 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002625 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002626 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002627
Peter Zijlstra502568d2010-01-22 14:35:46 +01002628 fake_cpuc->n_events = n;
2629 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002630 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002631 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002632
Peter Zijlstra502568d2010-01-22 14:35:46 +01002633 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002634
Peter Zijlstra502568d2010-01-22 14:35:46 +01002635 ret = x86_schedule_events(fake_cpuc, n, NULL);
2636
2637out_free:
2638 kfree(fake_cpuc);
2639out:
2640 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002641}
2642
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002643const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002644{
Stephane Eranian81130702010-01-21 17:39:01 +02002645 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002646 int err;
2647
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002648 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002649 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002650 /*
2651 * we temporarily connect event to its pmu
2652 * such that validate_group() can classify
2653 * it as an x86 event using is_x86_event()
2654 */
2655 tmp = event->pmu;
2656 event->pmu = &pmu;
2657
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002658 if (event->group_leader != event)
2659 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002660
2661 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002662 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002663 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002664 if (event->destroy)
2665 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002666 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002667 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002668
Robert Richter4aeb0b42009-04-29 12:47:03 +02002669 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002670}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002671
2672/*
2673 * callchain support
2674 */
2675
2676static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002677void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002678{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002679 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002680 entry->ip[entry->nr++] = ip;
2681}
2682
Tejun Heo245b2e72009-06-24 15:13:48 +09002683static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2684static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002685
2686
2687static void
2688backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2689{
2690 /* Ignore warnings */
2691}
2692
2693static void backtrace_warning(void *data, char *msg)
2694{
2695 /* Ignore warnings */
2696}
2697
2698static int backtrace_stack(void *data, char *name)
2699{
Ingo Molnar038e8362009-06-15 09:57:59 +02002700 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002701}
2702
2703static void backtrace_address(void *data, unsigned long addr, int reliable)
2704{
2705 struct perf_callchain_entry *entry = data;
2706
2707 if (reliable)
2708 callchain_store(entry, addr);
2709}
2710
2711static const struct stacktrace_ops backtrace_ops = {
2712 .warning = backtrace_warning,
2713 .warning_symbol = backtrace_warning_symbol,
2714 .stack = backtrace_stack,
2715 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002716 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002717};
2718
Ingo Molnar038e8362009-06-15 09:57:59 +02002719#include "../dumpstack.h"
2720
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002721static void
2722perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2723{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002724 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002725 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002726
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01002727 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002728}
2729
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002730/*
2731 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2732 */
2733static unsigned long
2734copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002735{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002736 unsigned long offset, addr = (unsigned long)from;
2737 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2738 unsigned long size, len = 0;
2739 struct page *page;
2740 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002741 int ret;
2742
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002743 do {
2744 ret = __get_user_pages_fast(addr, 1, 0, &page);
2745 if (!ret)
2746 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002747
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002748 offset = addr & (PAGE_SIZE - 1);
2749 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002750
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002751 map = kmap_atomic(page, type);
2752 memcpy(to, map+offset, size);
2753 kunmap_atomic(map, type);
2754 put_page(page);
2755
2756 len += size;
2757 to += size;
2758 addr += size;
2759
2760 } while (len < n);
2761
2762 return len;
2763}
2764
2765static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2766{
2767 unsigned long bytes;
2768
2769 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2770
2771 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002772}
2773
2774static void
2775perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2776{
2777 struct stack_frame frame;
2778 const void __user *fp;
2779
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002780 if (!user_mode(regs))
2781 regs = task_pt_regs(current);
2782
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002783 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002784
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002785 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002786 callchain_store(entry, regs->ip);
2787
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002788 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002789 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002790 frame.return_address = 0;
2791
2792 if (!copy_stack_frame(fp, &frame))
2793 break;
2794
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002795 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002796 break;
2797
2798 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002799 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002800 }
2801}
2802
2803static void
2804perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2805{
2806 int is_user;
2807
2808 if (!regs)
2809 return;
2810
2811 is_user = user_mode(regs);
2812
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002813 if (is_user && current->state != TASK_RUNNING)
2814 return;
2815
2816 if (!is_user)
2817 perf_callchain_kernel(regs, entry);
2818
2819 if (current->mm)
2820 perf_callchain_user(regs, entry);
2821}
2822
2823struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2824{
2825 struct perf_callchain_entry *entry;
2826
2827 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002828 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002829 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002830 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002831
2832 entry->nr = 0;
2833
2834 perf_do_callchain(regs, entry);
2835
2836 return entry;
2837}
Markus Metzger30dd5682009-07-21 15:56:48 +02002838
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002839void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002840{
2841 init_debug_store_on_cpu(cpu);
2842}