blob: 9686d38eb4587c07a6ef059224b78e177e8d3daa [file] [log] [blame]
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
Peter Zijlstra1c2ac3f2012-05-14 15:25:34 +020017#if 0
18#undef wrmsrl
19#define wrmsrl(msr, val) \
20do { \
21 unsigned int _msr = (msr); \
22 u64 _val = (val); \
23 trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
24 (unsigned long long)(_val)); \
25 native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
26} while (0)
27#endif
28
Kevin Winchesterde0428a2011-08-30 20:41:05 -030029/*
30 * | NHM/WSM | SNB |
31 * register -------------------------------
32 * | HT | no HT | HT | no HT |
33 *-----------------------------------------
34 * offcore | core | core | cpu | core |
35 * lbr_sel | core | core | cpu | core |
36 * ld_lat | cpu | core | cpu | core |
37 *-----------------------------------------
38 *
39 * Given that there is a small number of shared regs,
40 * we can pre-allocate their slot in the per-cpu
41 * per-core reg tables.
42 */
43enum extra_reg_type {
44 EXTRA_REG_NONE = -1, /* not used */
45
46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
Stephane Eranianb36817e2012-02-09 23:20:53 +010048 EXTRA_REG_LBR = 2, /* lbr_select */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030049
50 EXTRA_REG_MAX /* number of entries needed */
51};
52
53struct event_constraint {
54 union {
55 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
56 u64 idxmsk64;
57 };
58 u64 code;
59 u64 cmask;
60 int weight;
Robert Richterbc1738f2011-11-18 12:35:22 +010061 int overlap;
Stephane Eranian9fac2cf2013-01-24 16:10:27 +010062 int flags;
Kevin Winchesterde0428a2011-08-30 20:41:05 -030063};
64
65struct amd_nb {
66 int nb_id; /* NorthBridge id */
67 int refcnt; /* reference count */
68 struct perf_event *owners[X86_PMC_IDX_MAX];
69 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
70};
71
72/* The maximal number of PEBS events: */
Andi Kleen70ab7002012-06-05 17:56:48 -070073#define MAX_PEBS_EVENTS 8
Kevin Winchesterde0428a2011-08-30 20:41:05 -030074
75/*
76 * A debug store configuration.
77 *
78 * We only support architectures that use 64bit fields.
79 */
80struct debug_store {
81 u64 bts_buffer_base;
82 u64 bts_index;
83 u64 bts_absolute_maximum;
84 u64 bts_interrupt_threshold;
85 u64 pebs_buffer_base;
86 u64 pebs_index;
87 u64 pebs_absolute_maximum;
88 u64 pebs_interrupt_threshold;
89 u64 pebs_event_reset[MAX_PEBS_EVENTS];
90};
91
92/*
93 * Per register state.
94 */
95struct er_account {
96 raw_spinlock_t lock; /* per-core: protect structure */
97 u64 config; /* extra MSR config */
98 u64 reg; /* extra MSR number */
99 atomic_t ref; /* reference count */
100};
101
102/*
103 * Per core/cpu state
104 *
105 * Used to coordinate shared registers between HT threads or
106 * among events on a single PMU.
107 */
108struct intel_shared_regs {
109 struct er_account regs[EXTRA_REG_MAX];
110 int refcnt; /* per-core: #HT threads */
111 unsigned core_id; /* per-core: core id */
112};
113
114#define MAX_LBR_ENTRIES 16
115
116struct cpu_hw_events {
117 /*
118 * Generic x86 PMC bits
119 */
120 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
121 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
122 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
123 int enabled;
124
125 int n_events;
126 int n_added;
127 int n_txn;
128 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
129 u64 tags[X86_PMC_IDX_MAX];
130 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
131
132 unsigned int group_flag;
Peter Zijlstra5a4252942012-06-05 15:30:31 +0200133 int is_fake;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300134
135 /*
136 * Intel DebugStore bits
137 */
138 struct debug_store *ds;
139 u64 pebs_enabled;
140
141 /*
142 * Intel LBR bits
143 */
144 int lbr_users;
145 void *lbr_context;
146 struct perf_branch_stack lbr_stack;
147 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
Stephane Eranianb36817e2012-02-09 23:20:53 +0100148 struct er_account *lbr_sel;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100149 u64 br_sel;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300150
151 /*
Gleb Natapov144d31e2011-10-05 14:01:21 +0200152 * Intel host/guest exclude bits
153 */
154 u64 intel_ctrl_guest_mask;
155 u64 intel_ctrl_host_mask;
156 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
157
158 /*
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300159 * manage shared (per-core, per-cpu) registers
160 * used on Intel NHM/WSM/SNB
161 */
162 struct intel_shared_regs *shared_regs;
163
164 /*
165 * AMD specific bits
166 */
Joerg Roedel1018faa2012-02-29 14:57:32 +0100167 struct amd_nb *amd_nb;
168 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
169 u64 perf_ctr_virt_mask;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300170
171 void *kfree_on_online;
172};
173
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100174#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300175 { .idxmsk64 = (n) }, \
176 .code = (c), \
177 .cmask = (m), \
178 .weight = (w), \
Robert Richterbc1738f2011-11-18 12:35:22 +0100179 .overlap = (o), \
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100180 .flags = f, \
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300181}
182
183#define EVENT_CONSTRAINT(c, n, m) \
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100184 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
Robert Richterbc1738f2011-11-18 12:35:22 +0100185
186/*
187 * The overlap flag marks event constraints with overlapping counter
188 * masks. This is the case if the counter mask of such an event is not
189 * a subset of any other counter mask of a constraint with an equal or
190 * higher weight, e.g.:
191 *
192 * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
193 * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
194 * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
195 *
196 * The event scheduler may not select the correct counter in the first
197 * cycle because it needs to know which subsequent events will be
198 * scheduled. It may fail to schedule the events then. So we set the
199 * overlap flag for such constraints to give the scheduler a hint which
200 * events to select for counter rescheduling.
201 *
202 * Care must be taken as the rescheduling algorithm is O(n!) which
203 * will increase scheduling cycles for an over-commited system
204 * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
205 * and its counter masks must be kept at a minimum.
206 */
207#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100208 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300209
210/*
211 * Constraint on the Event code.
212 */
213#define INTEL_EVENT_CONSTRAINT(c, n) \
214 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
215
216/*
217 * Constraint on the Event code + UMask + fixed-mask
218 *
219 * filter mask to validate fixed counter events.
220 * the following filters disqualify for fixed counters:
221 * - inv
222 * - edge
223 * - cnt-mask
224 * The other filters are supported by fixed counters.
225 * The any-thread option is supported starting with v3.
226 */
227#define FIXED_EVENT_CONSTRAINT(c, n) \
228 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
229
230/*
231 * Constraint on the Event code + UMask
232 */
233#define INTEL_UEVENT_CONSTRAINT(c, n) \
234 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
235
236#define EVENT_CONSTRAINT_END \
237 EVENT_CONSTRAINT(0, 0, 0)
238
239#define for_each_event_constraint(e, c) \
240 for ((e) = (c); (e)->weight; (e)++)
241
242/*
243 * Extra registers for specific events.
244 *
245 * Some events need large masks and require external MSRs.
246 * Those extra MSRs end up being shared for all events on
247 * a PMU and sometimes between PMU of sibling HT threads.
248 * In either case, the kernel needs to handle conflicting
249 * accesses to those extra, shared, regs. The data structure
250 * to manage those registers is stored in cpu_hw_event.
251 */
252struct extra_reg {
253 unsigned int event;
254 unsigned int msr;
255 u64 config_mask;
256 u64 valid_mask;
257 int idx; /* per_xxx->regs[] reg index */
258};
259
260#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
261 .event = (e), \
262 .msr = (ms), \
263 .config_mask = (m), \
264 .valid_mask = (vm), \
265 .idx = EXTRA_REG_##i \
266 }
267
268#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
269 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
270
271#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
272
273union perf_capabilities {
274 struct {
275 u64 lbr_format:6;
276 u64 pebs_trap:1;
277 u64 pebs_arch_reg:1;
278 u64 pebs_format:4;
279 u64 smm_freeze:1;
280 };
281 u64 capabilities;
282};
283
Peter Zijlstrac1d6f422011-12-06 14:07:15 +0100284struct x86_pmu_quirk {
285 struct x86_pmu_quirk *next;
286 void (*func)(void);
287};
288
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +0100289union x86_pmu_config {
290 struct {
291 u64 event:8,
292 umask:8,
293 usr:1,
294 os:1,
295 edge:1,
296 pc:1,
297 interrupt:1,
298 __reserved1:1,
299 en:1,
300 inv:1,
301 cmask:8,
302 event2:4,
303 __reserved2:4,
304 go:1,
305 ho:1;
306 } bits;
307 u64 value;
308};
309
310#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
311
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300312/*
313 * struct x86_pmu - generic x86 pmu
314 */
315struct x86_pmu {
316 /*
317 * Generic x86 PMC bits
318 */
319 const char *name;
320 int version;
321 int (*handle_irq)(struct pt_regs *);
322 void (*disable_all)(void);
323 void (*enable_all)(int added);
324 void (*enable)(struct perf_event *);
325 void (*disable)(struct perf_event *);
326 int (*hw_config)(struct perf_event *event);
327 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
328 unsigned eventsel;
329 unsigned perfctr;
Jacob Shin4c1fd172013-02-06 11:26:27 -0600330 int (*addr_offset)(int index, bool eventsel);
Jacob Shin0fbdad02013-02-06 11:26:28 -0600331 int (*rdpmc_index)(int index);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300332 u64 (*event_map)(int);
333 int max_events;
334 int num_counters;
335 int num_counters_fixed;
336 int cntval_bits;
337 u64 cntval_mask;
Gleb Natapovffb871b2011-11-10 14:57:26 +0200338 union {
339 unsigned long events_maskl;
340 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
341 };
342 int events_mask_len;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300343 int apic;
344 u64 max_period;
345 struct event_constraint *
346 (*get_event_constraints)(struct cpu_hw_events *cpuc,
347 struct perf_event *event);
348
349 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
350 struct perf_event *event);
351 struct event_constraint *event_constraints;
Peter Zijlstrac1d6f422011-12-06 14:07:15 +0100352 struct x86_pmu_quirk *quirks;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300353 int perfctr_second_write;
354
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +0100355 /*
356 * sysfs attrs
357 */
358 int attr_rdpmc;
Jiri Olsa641cc932012-03-15 20:09:14 +0100359 struct attribute **format_attrs;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +0100360
Jiri Olsaa4747392012-10-10 14:53:11 +0200361 ssize_t (*events_sysfs_show)(char *page, u64 config);
Andi Kleen1a6461b2013-01-24 16:10:25 +0100362 struct attribute **cpu_events;
Jiri Olsaa4747392012-10-10 14:53:11 +0200363
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +0100364 /*
365 * CPU Hotplug hooks
366 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300367 int (*cpu_prepare)(int cpu);
368 void (*cpu_starting)(int cpu);
369 void (*cpu_dying)(int cpu);
370 void (*cpu_dead)(int cpu);
Peter Zijlstrac93dc842012-06-08 14:50:50 +0200371
372 void (*check_microcode)(void);
Stephane Eraniand010b332012-02-09 23:21:00 +0100373 void (*flush_branch_stack)(void);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300374
375 /*
376 * Intel Arch Perfmon v2+
377 */
378 u64 intel_ctrl;
379 union perf_capabilities intel_cap;
380
381 /*
382 * Intel DebugStore bits
383 */
Peter Zijlstra597ed952012-07-09 13:50:23 +0200384 unsigned int bts :1,
Peter Zijlstra3e0091e2012-06-26 23:38:39 +0200385 bts_active :1,
386 pebs :1,
387 pebs_active :1,
388 pebs_broken :1;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300389 int pebs_record_size;
390 void (*drain_pebs)(struct pt_regs *regs);
391 struct event_constraint *pebs_constraints;
Peter Zijlstra0780c922012-06-05 10:26:43 +0200392 void (*pebs_aliases)(struct perf_event *event);
Andi Kleen70ab7002012-06-05 17:56:48 -0700393 int max_pebs_events;
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300394
395 /*
396 * Intel LBR
397 */
398 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
399 int lbr_nr; /* hardware stack size */
Stephane Eranianb36817e2012-02-09 23:20:53 +0100400 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
401 const int *lbr_sel_map; /* lbr_select mappings */
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300402
403 /*
404 * Extra registers for events
405 */
406 struct extra_reg *extra_regs;
407 unsigned int er_flags;
Gleb Natapov144d31e2011-10-05 14:01:21 +0200408
409 /*
410 * Intel host/guest support (KVM)
411 */
412 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300413};
414
Peter Zijlstrac1d6f422011-12-06 14:07:15 +0100415#define x86_add_quirk(func_) \
416do { \
417 static struct x86_pmu_quirk __quirk __initdata = { \
418 .func = func_, \
419 }; \
420 __quirk.next = x86_pmu.quirks; \
421 x86_pmu.quirks = &__quirk; \
422} while (0)
423
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300424#define ERF_NO_HT_SHARING 1
425#define ERF_HAS_RSP_1 2
426
Stephane Eranian3a54aaa2013-01-24 16:10:26 +0100427#define EVENT_VAR(_id) event_attr_##_id
428#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
429
430#define EVENT_ATTR(_name, _id) \
431static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
432 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
433 .id = PERF_COUNT_HW_##_id, \
434 .event_str = NULL, \
435};
436
437#define EVENT_ATTR_STR(_name, v, str) \
438static struct perf_pmu_events_attr event_attr_##v = { \
439 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
440 .id = 0, \
441 .event_str = str, \
442};
443
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300444extern struct x86_pmu x86_pmu __read_mostly;
445
446DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
447
448int x86_perf_event_set_period(struct perf_event *event);
449
450/*
451 * Generalized hw caching related hw_event table, filled
452 * in on a per model basis. A value of 0 means
453 * 'not supported', -1 means 'hw_event makes no sense on
454 * this CPU', any other value means the raw hw_event
455 * ID.
456 */
457
458#define C(x) PERF_COUNT_HW_CACHE_##x
459
460extern u64 __read_mostly hw_cache_event_ids
461 [PERF_COUNT_HW_CACHE_MAX]
462 [PERF_COUNT_HW_CACHE_OP_MAX]
463 [PERF_COUNT_HW_CACHE_RESULT_MAX];
464extern u64 __read_mostly hw_cache_extra_regs
465 [PERF_COUNT_HW_CACHE_MAX]
466 [PERF_COUNT_HW_CACHE_OP_MAX]
467 [PERF_COUNT_HW_CACHE_RESULT_MAX];
468
469u64 x86_perf_event_update(struct perf_event *event);
470
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300471static inline unsigned int x86_pmu_config_addr(int index)
472{
Jacob Shin4c1fd172013-02-06 11:26:27 -0600473 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
474 x86_pmu.addr_offset(index, true) : index);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300475}
476
477static inline unsigned int x86_pmu_event_addr(int index)
478{
Jacob Shin4c1fd172013-02-06 11:26:27 -0600479 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
480 x86_pmu.addr_offset(index, false) : index);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300481}
482
Jacob Shin0fbdad02013-02-06 11:26:28 -0600483static inline int x86_pmu_rdpmc_index(int index)
484{
485 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
486}
487
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300488int x86_setup_perfctr(struct perf_event *event);
489
490int x86_pmu_hw_config(struct perf_event *event);
491
492void x86_pmu_disable_all(void);
493
494static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
495 u64 enable_mask)
496{
Joerg Roedel1018faa2012-02-29 14:57:32 +0100497 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
498
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300499 if (hwc->extra_reg.reg)
500 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
Joerg Roedel1018faa2012-02-29 14:57:32 +0100501 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300502}
503
504void x86_pmu_enable_all(int added);
505
Yan, Zheng4b4969b2012-06-15 14:31:30 +0800506int perf_assign_events(struct event_constraint **constraints, int n,
507 int wmin, int wmax, int *assign);
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300508int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
509
510void x86_pmu_stop(struct perf_event *event, int flags);
511
512static inline void x86_pmu_disable_event(struct perf_event *event)
513{
514 struct hw_perf_event *hwc = &event->hw;
515
516 wrmsrl(hwc->config_base, hwc->config);
517}
518
519void x86_pmu_enable_event(struct perf_event *event);
520
521int x86_pmu_handle_irq(struct pt_regs *regs);
522
523extern struct event_constraint emptyconstraint;
524
525extern struct event_constraint unconstrained;
526
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100527static inline bool kernel_ip(unsigned long ip)
528{
529#ifdef CONFIG_X86_32
530 return ip > PAGE_OFFSET;
531#else
532 return (long)ip < 0;
533#endif
534}
535
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +0200536/*
537 * Not all PMUs provide the right context information to place the reported IP
538 * into full context. Specifically segment registers are typically not
539 * supplied.
540 *
541 * Assuming the address is a linear address (it is for IBS), we fake the CS and
542 * vm86 mode using the known zero-based code segment and 'fix up' the registers
543 * to reflect this.
544 *
545 * Intel PEBS/LBR appear to typically provide the effective address, nothing
546 * much we can do about that but pray and treat it like a linear address.
547 */
548static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
549{
550 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
551 if (regs->flags & X86_VM_MASK)
552 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
553 regs->ip = ip;
554}
555
Jiri Olsa0bf79d42012-10-10 14:53:14 +0200556ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
Jiri Olsa20550a42012-10-10 14:53:15 +0200557ssize_t intel_event_sysfs_show(char *page, u64 config);
Jiri Olsa43c032f2012-10-10 14:53:13 +0200558
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300559#ifdef CONFIG_CPU_SUP_AMD
560
561int amd_pmu_init(void);
562
563#else /* CONFIG_CPU_SUP_AMD */
564
565static inline int amd_pmu_init(void)
566{
567 return 0;
568}
569
570#endif /* CONFIG_CPU_SUP_AMD */
571
572#ifdef CONFIG_CPU_SUP_INTEL
573
574int intel_pmu_save_and_restart(struct perf_event *event);
575
576struct event_constraint *
577x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
578
579struct intel_shared_regs *allocate_shared_regs(int cpu);
580
581int intel_pmu_init(void);
582
583void init_debug_store_on_cpu(int cpu);
584
585void fini_debug_store_on_cpu(int cpu);
586
587void release_ds_buffers(void);
588
589void reserve_ds_buffers(void);
590
591extern struct event_constraint bts_constraint;
592
593void intel_pmu_enable_bts(u64 config);
594
595void intel_pmu_disable_bts(void);
596
597int intel_pmu_drain_bts_buffer(void);
598
599extern struct event_constraint intel_core2_pebs_event_constraints[];
600
601extern struct event_constraint intel_atom_pebs_event_constraints[];
602
603extern struct event_constraint intel_nehalem_pebs_event_constraints[];
604
605extern struct event_constraint intel_westmere_pebs_event_constraints[];
606
607extern struct event_constraint intel_snb_pebs_event_constraints[];
608
Stephane Eranian20a36e32012-09-11 01:07:01 +0200609extern struct event_constraint intel_ivb_pebs_event_constraints[];
610
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300611struct event_constraint *intel_pebs_constraints(struct perf_event *event);
612
613void intel_pmu_pebs_enable(struct perf_event *event);
614
615void intel_pmu_pebs_disable(struct perf_event *event);
616
617void intel_pmu_pebs_enable_all(void);
618
619void intel_pmu_pebs_disable_all(void);
620
621void intel_ds_init(void);
622
623void intel_pmu_lbr_reset(void);
624
625void intel_pmu_lbr_enable(struct perf_event *event);
626
627void intel_pmu_lbr_disable(struct perf_event *event);
628
629void intel_pmu_lbr_enable_all(void);
630
631void intel_pmu_lbr_disable_all(void);
632
633void intel_pmu_lbr_read(void);
634
635void intel_pmu_lbr_init_core(void);
636
637void intel_pmu_lbr_init_nhm(void);
638
639void intel_pmu_lbr_init_atom(void);
640
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +0100641void intel_pmu_lbr_init_snb(void);
642
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100643int intel_pmu_setup_lbr_filter(struct perf_event *event);
644
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300645int p4_pmu_init(void);
646
647int p6_pmu_init(void);
648
Vince Weavere717bf42012-09-26 14:12:52 -0400649int knc_pmu_init(void);
650
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300651#else /* CONFIG_CPU_SUP_INTEL */
652
653static inline void reserve_ds_buffers(void)
654{
655}
656
657static inline void release_ds_buffers(void)
658{
659}
660
661static inline int intel_pmu_init(void)
662{
663 return 0;
664}
665
666static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
667{
668 return NULL;
669}
670
671#endif /* CONFIG_CPU_SUP_INTEL */