blob: 3efdf2870a3572263add749326aa5925df7c461f [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020025#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020030#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020031#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020032#include <asm/compat.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010033
Peter Zijlstra7645a242010-03-08 13:51:31 +010034#if 0
35#undef wrmsrl
36#define wrmsrl(msr, val) \
37do { \
38 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
39 (unsigned long)(val)); \
40 native_write_msr((msr), (u32)((u64)(val)), \
41 (u32)((u64)(val) >> 32)); \
42} while (0)
43#endif
44
Peter Zijlstraef21f682010-03-03 13:12:23 +010045/*
46 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
47 */
48static unsigned long
49copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
50{
51 unsigned long offset, addr = (unsigned long)from;
52 int type = in_nmi() ? KM_NMI : KM_IRQ0;
53 unsigned long size, len = 0;
54 struct page *page;
55 void *map;
56 int ret;
57
58 do {
59 ret = __get_user_pages_fast(addr, 1, 0, &page);
60 if (!ret)
61 break;
62
63 offset = addr & (PAGE_SIZE - 1);
64 size = min(PAGE_SIZE - offset, n - len);
65
66 map = kmap_atomic(page, type);
67 memcpy(to, map+offset, size);
68 kunmap_atomic(map, type);
69 put_page(page);
70
71 len += size;
72 to += size;
73 addr += size;
74
75 } while (len < n);
76
77 return len;
78}
79
Stephane Eranian1da53e02010-01-18 10:58:01 +020080struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010081 union {
82 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010083 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010084 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010085 u64 code;
86 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010087 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020088};
89
Stephane Eranian38331f62010-02-08 17:17:01 +020090struct amd_nb {
91 int nb_id; /* NorthBridge id */
92 int refcnt; /* reference count */
93 struct perf_event *owners[X86_PMC_IDX_MAX];
94 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
95};
96
Peter Zijlstracaff2be2010-03-03 12:02:30 +010097#define MAX_LBR_ENTRIES 16
98
Ingo Molnarcdd6c482009-09-21 12:02:48 +020099struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +0100100 /*
101 * Generic x86 PMC bits
102 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100105 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200106
107 int n_events;
108 int n_added;
Stephane Eranian90151c352010-05-25 16:23:10 +0200109 int n_txn;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200110 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200111 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200112 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100113
Lin Ming4d1c52b2010-04-23 13:56:12 +0800114 unsigned int group_flag;
115
Peter Zijlstraca037702010-03-02 19:52:12 +0100116 /*
117 * Intel DebugStore bits
118 */
119 struct debug_store *ds;
120 u64 pebs_enabled;
121
122 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100123 * Intel LBR bits
124 */
125 int lbr_users;
126 void *lbr_context;
127 struct perf_branch_stack lbr_stack;
128 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
129
130 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100131 * AMD specific bits
132 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200133 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100134};
135
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100136#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100137 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100138 .code = (c), \
139 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100140 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100141}
Stephane Eranianb6900812009-10-06 16:42:09 +0200142
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100143#define EVENT_CONSTRAINT(c, n, m) \
144 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
145
Peter Zijlstraca037702010-03-02 19:52:12 +0100146/*
147 * Constraint on the Event code.
148 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100149#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200150 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100151
Peter Zijlstraca037702010-03-02 19:52:12 +0100152/*
153 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200154 *
155 * filter mask to validate fixed counter events.
156 * the following filters disqualify for fixed counters:
157 * - inv
158 * - edge
159 * - cnt-mask
160 * The other filters are supported by fixed counters.
161 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100162 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100163#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200164 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100165
Peter Zijlstraca037702010-03-02 19:52:12 +0100166/*
167 * Constraint on the Event code + UMask
168 */
169#define PEBS_EVENT_CONSTRAINT(c, n) \
170 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
171
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100172#define EVENT_CONSTRAINT_END \
173 EVENT_CONSTRAINT(0, 0, 0)
174
175#define for_each_event_constraint(e, c) \
Robert Richtera1f2b702010-04-13 22:23:15 +0200176 for ((e) = (c); (e)->weight; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200177
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100178union perf_capabilities {
179 struct {
180 u64 lbr_format : 6;
181 u64 pebs_trap : 1;
182 u64 pebs_arch_reg : 1;
183 u64 pebs_format : 4;
184 u64 smm_freeze : 1;
185 };
186 u64 capabilities;
187};
188
Ingo Molnar241771e2008-12-03 10:39:53 +0100189/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200190 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100191 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200192struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100193 /*
194 * Generic x86 PMC bits
195 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200196 const char *name;
197 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800198 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200199 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100200 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100201 void (*enable)(struct perf_event *);
202 void (*disable)(struct perf_event *);
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200203 int (*hw_config)(struct perf_event *event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300204 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530205 unsigned eventsel;
206 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100207 u64 (*event_map)(int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530208 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200209 int num_counters;
210 int num_counters_fixed;
211 int cntval_bits;
212 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200213 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200214 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100215 struct event_constraint *
216 (*get_event_constraints)(struct cpu_hw_events *cpuc,
217 struct perf_event *event);
218
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100219 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
220 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100221 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100222 void (*quirks)(void);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400223 int perfctr_second_write;
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100224
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100225 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100226 void (*cpu_starting)(int cpu);
227 void (*cpu_dying)(int cpu);
228 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100229
230 /*
231 * Intel Arch Perfmon v2+
232 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100233 u64 intel_ctrl;
234 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100235
236 /*
237 * Intel DebugStore bits
238 */
239 int bts, pebs;
240 int pebs_record_size;
241 void (*drain_pebs)(struct pt_regs *regs);
242 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100243
244 /*
245 * Intel LBR
246 */
247 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
248 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530249};
250
Robert Richter4a06bd82009-04-29 12:47:11 +0200251static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530252
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200253static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100254 .enabled = 1,
255};
Ingo Molnar241771e2008-12-03 10:39:53 +0100256
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100257static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200258
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530259/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200260 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200261 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200262 * 'not supported', -1 means 'hw_event makes no sense on
263 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200264 * ID.
265 */
266
267#define C(x) PERF_COUNT_HW_CACHE_##x
268
269static u64 __read_mostly hw_cache_event_ids
270 [PERF_COUNT_HW_CACHE_MAX]
271 [PERF_COUNT_HW_CACHE_OP_MAX]
272 [PERF_COUNT_HW_CACHE_RESULT_MAX];
273
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530274/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200275 * Propagate event elapsed time into the generic event.
276 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100277 * Returns the delta events processed.
278 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200279static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100280x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100281{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100282 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200283 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200284 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100285 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200286 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100287
Markus Metzger30dd5682009-07-21 15:56:48 +0200288 if (idx == X86_PMC_IDX_FIXED_BTS)
289 return 0;
290
Ingo Molnaree060942008-12-13 09:00:03 +0100291 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200292 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100293 *
294 * Our tactic to handle this is to first atomically read and
295 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200296 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100297 */
298again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200299 prev_raw_count = local64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200300 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100301
Peter Zijlstrae7850592010-05-21 14:43:08 +0200302 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +0100303 new_raw_count) != prev_raw_count)
304 goto again;
305
306 /*
307 * Now we have the new raw value and have updated the prev
308 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200309 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100310 *
311 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200312 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100313 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200314 delta = (new_raw_count << shift) - (prev_raw_count << shift);
315 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100316
Peter Zijlstrae7850592010-05-21 14:43:08 +0200317 local64_add(delta, &event->count);
318 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200319
320 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100321}
322
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200323static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200324static DEFINE_MUTEX(pmc_reserve_mutex);
325
Robert Richterb27ea292010-03-17 12:49:10 +0100326#ifdef CONFIG_X86_LOCAL_APIC
327
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200328static bool reserve_pmc_hardware(void)
329{
330 int i;
331
332 if (nmi_watchdog == NMI_LOCAL_APIC)
333 disable_lapic_nmi_watchdog();
334
Robert Richter948b1bb2010-03-29 18:36:50 +0200335 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200336 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200337 goto perfctr_fail;
338 }
339
Robert Richter948b1bb2010-03-29 18:36:50 +0200340 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200341 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200342 goto eventsel_fail;
343 }
344
345 return true;
346
347eventsel_fail:
348 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200349 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200350
Robert Richter948b1bb2010-03-29 18:36:50 +0200351 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200352
353perfctr_fail:
354 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200355 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200356
357 if (nmi_watchdog == NMI_LOCAL_APIC)
358 enable_lapic_nmi_watchdog();
359
360 return false;
361}
362
363static void release_pmc_hardware(void)
364{
365 int i;
366
Robert Richter948b1bb2010-03-29 18:36:50 +0200367 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200368 release_perfctr_nmi(x86_pmu.perfctr + i);
369 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200370 }
371
372 if (nmi_watchdog == NMI_LOCAL_APIC)
373 enable_lapic_nmi_watchdog();
374}
375
Robert Richterb27ea292010-03-17 12:49:10 +0100376#else
377
378static bool reserve_pmc_hardware(void) { return true; }
379static void release_pmc_hardware(void) {}
380
381#endif
382
Peter Zijlstraca037702010-03-02 19:52:12 +0100383static int reserve_ds_buffers(void);
384static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200385
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200386static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200387{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200388 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200389 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100390 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200391 mutex_unlock(&pmc_reserve_mutex);
392 }
393}
394
Robert Richter85cf9db2009-04-29 12:47:20 +0200395static inline int x86_pmu_initialized(void)
396{
397 return x86_pmu.handle_irq != NULL;
398}
399
Ingo Molnar8326f442009-06-05 20:22:46 +0200400static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200401set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200402{
403 unsigned int cache_type, cache_op, cache_result;
404 u64 config, val;
405
406 config = attr->config;
407
408 cache_type = (config >> 0) & 0xff;
409 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
410 return -EINVAL;
411
412 cache_op = (config >> 8) & 0xff;
413 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
414 return -EINVAL;
415
416 cache_result = (config >> 16) & 0xff;
417 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
418 return -EINVAL;
419
420 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
421
422 if (val == 0)
423 return -ENOENT;
424
425 if (val == -1)
426 return -EINVAL;
427
428 hwc->config |= val;
429
430 return 0;
431}
432
Robert Richterc1726f32010-04-13 22:23:11 +0200433static int x86_setup_perfctr(struct perf_event *event)
434{
435 struct perf_event_attr *attr = &event->attr;
436 struct hw_perf_event *hwc = &event->hw;
437 u64 config;
438
439 if (!hwc->sample_period) {
440 hwc->sample_period = x86_pmu.max_period;
441 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200442 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200443 } else {
444 /*
445 * If we have a PMU initialized but no APIC
446 * interrupts, we cannot sample hardware
447 * events (user-space has to fall back and
448 * sample via a hrtimer based software event):
449 */
450 if (!x86_pmu.apic)
451 return -EOPNOTSUPP;
452 }
453
454 if (attr->type == PERF_TYPE_RAW)
455 return 0;
456
457 if (attr->type == PERF_TYPE_HW_CACHE)
458 return set_ext_hw_attr(hwc, attr);
459
460 if (attr->config >= x86_pmu.max_events)
461 return -EINVAL;
462
463 /*
464 * The generic map:
465 */
466 config = x86_pmu.event_map(attr->config);
467
468 if (config == 0)
469 return -ENOENT;
470
471 if (config == -1LL)
472 return -EINVAL;
473
474 /*
475 * Branch tracing:
476 */
477 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
478 (hwc->sample_period == 1)) {
479 /* BTS is not supported by this architecture. */
480 if (!x86_pmu.bts)
481 return -EOPNOTSUPP;
482
483 /* BTS is currently only allowed for user-mode. */
484 if (!attr->exclude_kernel)
485 return -EOPNOTSUPP;
486 }
487
488 hwc->config |= config;
489
490 return 0;
491}
Robert Richter4261e0e2010-04-13 22:23:10 +0200492
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200493static int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300494{
Peter Zijlstraab608342010-04-08 23:03:20 +0200495 if (event->attr.precise_ip) {
496 int precise = 0;
497
498 /* Support for constant skid */
499 if (x86_pmu.pebs)
500 precise++;
501
502 /* Support for IP fixup */
503 if (x86_pmu.lbr_nr)
504 precise++;
505
506 if (event->attr.precise_ip > precise)
507 return -EOPNOTSUPP;
508 }
509
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300510 /*
511 * Generate PMC IRQs:
512 * (keep 'enabled' bit clear for now)
513 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200514 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300515
516 /*
517 * Count user and OS events unless requested not to
518 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200519 if (!event->attr.exclude_user)
520 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
521 if (!event->attr.exclude_kernel)
522 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
523
524 if (event->attr.type == PERF_TYPE_RAW)
525 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300526
Robert Richter9d0fcba62010-04-13 22:23:12 +0200527 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300528}
529
Ingo Molnaree060942008-12-13 09:00:03 +0100530/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200531 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100532 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200533static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100534{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200535 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100536
Robert Richter85cf9db2009-04-29 12:47:20 +0200537 if (!x86_pmu_initialized())
538 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100539
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200540 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200541 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200542 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200543 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200544 if (!reserve_pmc_hardware())
545 err = -EBUSY;
Stephane Eranian4b24a882010-03-17 23:21:01 +0200546 else {
Peter Zijlstraca037702010-03-02 19:52:12 +0100547 err = reserve_ds_buffers();
Stephane Eranian4b24a882010-03-17 23:21:01 +0200548 if (err)
549 release_pmc_hardware();
550 }
Markus Metzger30dd5682009-07-21 15:56:48 +0200551 }
552 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200553 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200554 mutex_unlock(&pmc_reserve_mutex);
555 }
556 if (err)
557 return err;
558
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200559 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200560
Robert Richter4261e0e2010-04-13 22:23:10 +0200561 event->hw.idx = -1;
562 event->hw.last_cpu = -1;
563 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200564
Robert Richter9d0fcba62010-04-13 22:23:12 +0200565 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200566}
567
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100568static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530569{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200570 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200571 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100572
Robert Richter948b1bb2010-03-29 18:36:50 +0200573 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100574 u64 val;
575
Robert Richter43f62012009-04-29 16:55:56 +0200576 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200577 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100578 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100579 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200580 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100581 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100582 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530583 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530584}
585
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200586void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530587{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200588 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
589
Robert Richter85cf9db2009-04-29 12:47:20 +0200590 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200591 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200592
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100593 if (!cpuc->enabled)
594 return;
595
596 cpuc->n_added = 0;
597 cpuc->enabled = 0;
598 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200599
600 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530601}
Ingo Molnar241771e2008-12-03 10:39:53 +0100602
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100603static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530604{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200605 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530606 int idx;
607
Robert Richter948b1bb2010-03-29 18:36:50 +0200608 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200609 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200610 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100611
Robert Richter43f62012009-04-29 16:55:56 +0200612 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200613 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200614
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200615 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100616 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100617 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530618 }
619}
620
Stephane Eranian1da53e02010-01-18 10:58:01 +0200621static const struct pmu pmu;
622
623static inline int is_x86_event(struct perf_event *event)
624{
625 return event->pmu == &pmu;
626}
627
628static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
629{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100630 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200631 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100632 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200633 struct hw_perf_event *hwc;
634
635 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
636
637 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100638 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
639 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200640 }
641
642 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200643 * fastpath, try to reuse previous register
644 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100645 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200646 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100647 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200648
649 /* never assigned */
650 if (hwc->idx == -1)
651 break;
652
653 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100654 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200655 break;
656
657 /* not already used */
658 if (test_bit(hwc->idx, used_mask))
659 break;
660
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100661 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200662 if (assign)
663 assign[i] = hwc->idx;
664 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100665 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200666 goto done;
667
668 /*
669 * begin slow path
670 */
671
672 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
673
674 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200675 * weight = number of possible counters
676 *
677 * 1 = most constrained, only works on one counter
678 * wmax = least constrained, works on any counter
679 *
680 * assign events to counters starting with most
681 * constrained events.
682 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200683 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200684
685 /*
686 * when fixed event counters are present,
687 * wmax is incremented by 1 to account
688 * for one more choice
689 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200690 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200691 wmax++;
692
Stephane Eranian81130702010-01-21 17:39:01 +0200693 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200694 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200695 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100696 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200697 hwc = &cpuc->event_list[i]->hw;
698
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100699 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200700 continue;
701
Akinobu Mita984b3f52010-03-05 13:41:37 -0800702 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200703 if (!test_bit(j, used_mask))
704 break;
705 }
706
707 if (j == X86_PMC_IDX_MAX)
708 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200709
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100710 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200711
Stephane Eranian1da53e02010-01-18 10:58:01 +0200712 if (assign)
713 assign[i] = j;
714 num--;
715 }
716 }
Stephane Eranian81130702010-01-21 17:39:01 +0200717done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200718 /*
719 * scheduling failed or is just a simulation,
720 * free resources if necessary
721 */
722 if (!assign || num) {
723 for (i = 0; i < n; i++) {
724 if (x86_pmu.put_event_constraints)
725 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
726 }
727 }
728 return num ? -ENOSPC : 0;
729}
730
731/*
732 * dogrp: true if must collect siblings events (group)
733 * returns total number of events and error code
734 */
735static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
736{
737 struct perf_event *event;
738 int n, max_count;
739
Robert Richter948b1bb2010-03-29 18:36:50 +0200740 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200741
742 /* current number of events already accepted */
743 n = cpuc->n_events;
744
745 if (is_x86_event(leader)) {
746 if (n >= max_count)
747 return -ENOSPC;
748 cpuc->event_list[n] = leader;
749 n++;
750 }
751 if (!dogrp)
752 return n;
753
754 list_for_each_entry(event, &leader->sibling_list, group_entry) {
755 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200756 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200757 continue;
758
759 if (n >= max_count)
760 return -ENOSPC;
761
762 cpuc->event_list[n] = event;
763 n++;
764 }
765 return n;
766}
767
Stephane Eranian1da53e02010-01-18 10:58:01 +0200768static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200769 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200770{
Stephane Eranian447a1942010-02-01 14:50:01 +0200771 struct hw_perf_event *hwc = &event->hw;
772
773 hwc->idx = cpuc->assign[i];
774 hwc->last_cpu = smp_processor_id();
775 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200776
777 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
778 hwc->config_base = 0;
779 hwc->event_base = 0;
780 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
781 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
782 /*
783 * We set it so that event_base + idx in wrmsr/rdmsr maps to
784 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
785 */
786 hwc->event_base =
787 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
788 } else {
789 hwc->config_base = x86_pmu.eventsel;
790 hwc->event_base = x86_pmu.perfctr;
791 }
792}
793
Stephane Eranian447a1942010-02-01 14:50:01 +0200794static inline int match_prev_assignment(struct hw_perf_event *hwc,
795 struct cpu_hw_events *cpuc,
796 int i)
797{
798 return hwc->idx == cpuc->assign[i] &&
799 hwc->last_cpu == smp_processor_id() &&
800 hwc->last_tag == cpuc->tags[i];
801}
802
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100803static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200804static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100805
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200806void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100807{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200808 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
809 struct perf_event *event;
810 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100811 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200812
Robert Richter85cf9db2009-04-29 12:47:20 +0200813 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100814 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100815
816 if (cpuc->enabled)
817 return;
818
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100820 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200821 /*
822 * apply assignment obtained either from
823 * hw_perf_group_sched_in() or x86_pmu_enable()
824 *
825 * step1: save events moving to new counters
826 * step2: reprogram moved events into new counters
827 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100828 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200829 event = cpuc->event_list[i];
830 hwc = &event->hw;
831
Stephane Eranian447a1942010-02-01 14:50:01 +0200832 /*
833 * we can avoid reprogramming counter if:
834 * - assigned same counter as last time
835 * - running on same CPU as last time
836 * - no other event has used the counter since
837 */
838 if (hwc->idx == -1 ||
839 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200840 continue;
841
Stephane Eraniand76a0812010-02-08 17:06:01 +0200842 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200843 }
844
845 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200846 event = cpuc->event_list[i];
847 hwc = &event->hw;
848
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100849 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200850 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100851 else if (i < n_running)
852 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200853
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100854 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200855 }
856 cpuc->n_added = 0;
857 perf_events_lapic_init();
858 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100859
860 cpuc->enabled = 1;
861 barrier();
862
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100863 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100864}
Ingo Molnaree060942008-12-13 09:00:03 +0100865
Robert Richter31fa58a2010-04-13 22:23:14 +0200866static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
867 u64 enable_mask)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100868{
Robert Richter31fa58a2010-04-13 22:23:14 +0200869 wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100870}
871
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100872static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100873{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100874 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100875
876 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100877}
878
Tejun Heo245b2e72009-06-24 15:13:48 +0900879static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100880
Ingo Molnaree060942008-12-13 09:00:03 +0100881/*
882 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200883 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100884 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200885static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100886x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100887{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100888 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200889 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200890 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100891 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100892
Markus Metzger30dd5682009-07-21 15:56:48 +0200893 if (idx == X86_PMC_IDX_FIXED_BTS)
894 return 0;
895
Ingo Molnaree060942008-12-13 09:00:03 +0100896 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200897 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100898 */
899 if (unlikely(left <= -period)) {
900 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200901 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200902 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200903 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100904 }
905
906 if (unlikely(left <= 0)) {
907 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200908 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200909 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200910 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100911 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200912 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200913 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200914 */
915 if (unlikely(left < 2))
916 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100917
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200918 if (left > x86_pmu.max_period)
919 left = x86_pmu.max_period;
920
Tejun Heo245b2e72009-06-24 15:13:48 +0900921 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100922
923 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100925 * mark it to be able to extra future deltas:
926 */
Peter Zijlstrae7850592010-05-21 14:43:08 +0200927 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100928
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400929 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
930
931 /*
932 * Due to erratum on certan cpu we need
933 * a second write to be sure the register
934 * is updated properly
935 */
936 if (x86_pmu.perfctr_second_write) {
937 wrmsrl(hwc->event_base + idx,
Robert Richter948b1bb2010-03-29 18:36:50 +0200938 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400939 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200940
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200941 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200942
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200943 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100944}
945
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100946static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200947{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200948 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200949 if (cpuc->enabled)
Robert Richter31fa58a2010-04-13 22:23:14 +0200950 __x86_pmu_enable_event(&event->hw,
951 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +0100952}
953
Ingo Molnaree060942008-12-13 09:00:03 +0100954/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200955 * activate a single event
956 *
957 * The event is added to the group of enabled events
958 * but only if it can be scehduled with existing events.
959 *
960 * Called with PMU disabled. If successful and return value 1,
961 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200962 */
963static int x86_pmu_enable(struct perf_event *event)
964{
965 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200966 struct hw_perf_event *hwc;
967 int assign[X86_PMC_IDX_MAX];
968 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200969
Stephane Eranian1da53e02010-01-18 10:58:01 +0200970 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200971
Stephane Eranian1da53e02010-01-18 10:58:01 +0200972 n0 = cpuc->n_events;
973 n = collect_events(cpuc, event, false);
974 if (n < 0)
975 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200976
Lin Ming4d1c52b2010-04-23 13:56:12 +0800977 /*
978 * If group events scheduling transaction was started,
979 * skip the schedulability test here, it will be peformed
980 * at commit time(->commit_txn) as a whole
981 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200982 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Ming4d1c52b2010-04-23 13:56:12 +0800983 goto out;
984
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300985 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200986 if (ret)
987 return ret;
988 /*
989 * copy new assignment, now we know it is possible
990 * will be used by hw_perf_enable()
991 */
992 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100993
Lin Ming4d1c52b2010-04-23 13:56:12 +0800994out:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200995 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100996 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +0200997 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100998
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100999 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001000}
1001
Stephane Eraniand76a0812010-02-08 17:06:01 +02001002static int x86_pmu_start(struct perf_event *event)
1003{
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 int idx = event->hw.idx;
1006
1007 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001008 return -EAGAIN;
1009
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001010 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001011 cpuc->events[idx] = event;
1012 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001013 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001014 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +02001015
1016 return 0;
1017}
1018
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001019static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001020{
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001021 int ret = x86_pmu_start(event);
1022 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001023}
1024
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001025void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001026{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001027 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +01001028 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001029 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001030 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001031 int cpu, idx;
1032
Robert Richter948b1bb2010-03-29 18:36:50 +02001033 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001034 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001035
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001036 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001037
1038 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001039 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001040
Robert Richterfaa28ae2009-04-29 12:47:13 +02001041 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301042 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1043 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1044 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1045 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001046 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001047
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301048 pr_info("\n");
1049 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1050 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1051 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1052 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001053 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301054 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001055 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001056
Robert Richter948b1bb2010-03-29 18:36:50 +02001057 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001058 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1059 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001060
Tejun Heo245b2e72009-06-24 15:13:48 +09001061 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001062
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301063 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001064 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301065 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001066 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301067 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001068 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001069 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001070 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001071 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1072
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301073 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001074 cpu, idx, pmc_count);
1075 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001076 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001077}
1078
Stephane Eraniand76a0812010-02-08 17:06:01 +02001079static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001080{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001081 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001082 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001083 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001084
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001085 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1086 return;
1087
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001088 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001089
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001090 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001091 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001092 * that we are disabling:
1093 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001094 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001095
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001096 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001097}
1098
1099static void x86_pmu_disable(struct perf_event *event)
1100{
1101 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1102 int i;
1103
Stephane Eranian90151c352010-05-25 16:23:10 +02001104 /*
1105 * If we're called during a txn, we don't need to do anything.
1106 * The events never got scheduled and ->cancel_txn will truncate
1107 * the event_list.
1108 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001109 if (cpuc->group_flag & PERF_EVENT_TXN)
Stephane Eranian90151c352010-05-25 16:23:10 +02001110 return;
1111
Stephane Eraniand76a0812010-02-08 17:06:01 +02001112 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001113
Stephane Eranian1da53e02010-01-18 10:58:01 +02001114 for (i = 0; i < cpuc->n_events; i++) {
1115 if (event == cpuc->event_list[i]) {
1116
1117 if (x86_pmu.put_event_constraints)
1118 x86_pmu.put_event_constraints(cpuc, event);
1119
1120 while (++i < cpuc->n_events)
1121 cpuc->event_list[i-1] = cpuc->event_list[i];
1122
1123 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001124 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001125 }
1126 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001127 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001128}
1129
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001130static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001131{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001132 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001133 struct cpu_hw_events *cpuc;
1134 struct perf_event *event;
1135 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001136 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001137 u64 val;
1138
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001139 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001140
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001141 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001142
Robert Richter948b1bb2010-03-29 18:36:50 +02001143 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001144 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001145 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001146
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001147 event = cpuc->events[idx];
1148 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001149
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001150 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001151 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001152 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001153
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001154 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001155 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001156 */
Robert Richter4177c422010-09-02 15:07:48 -04001157 handled++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001158 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001159
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001160 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001161 continue;
1162
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001163 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001164 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001165 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001166
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001167 if (handled)
1168 inc_irq_stat(apic_perf_irqs);
1169
Robert Richtera29aa8a2009-04-29 12:47:21 +02001170 return handled;
1171}
Robert Richter39d81ea2009-04-29 12:47:05 +02001172
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001173void smp_perf_pending_interrupt(struct pt_regs *regs)
1174{
1175 irq_enter();
1176 ack_APIC_irq();
1177 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001178 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001179 irq_exit();
1180}
1181
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001182void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001183{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001184#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001185 if (!x86_pmu.apic || !x86_pmu_initialized())
1186 return;
1187
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001188 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001189#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001190}
1191
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001192void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001193{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001194 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001195 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001196
Ingo Molnar241771e2008-12-03 10:39:53 +01001197 /*
Yong Wangc323d952009-05-29 13:28:35 +08001198 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001199 */
Yong Wangc323d952009-05-29 13:28:35 +08001200 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001201}
1202
Robert Richter4177c422010-09-02 15:07:48 -04001203struct pmu_nmi_state {
1204 unsigned int marked;
1205 int handled;
1206};
1207
1208static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1209
Ingo Molnar241771e2008-12-03 10:39:53 +01001210static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001211perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001212 unsigned long cmd, void *__args)
1213{
1214 struct die_args *args = __args;
Robert Richter4177c422010-09-02 15:07:48 -04001215 unsigned int this_nmi;
1216 int handled;
Ingo Molnar241771e2008-12-03 10:39:53 +01001217
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001218 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001219 return NOTIFY_DONE;
1220
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001221 switch (cmd) {
1222 case DIE_NMI:
1223 case DIE_NMI_IPI:
1224 break;
Robert Richter4177c422010-09-02 15:07:48 -04001225 case DIE_NMIUNKNOWN:
1226 this_nmi = percpu_read(irq_stat.__nmi_count);
1227 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1228 /* let the kernel handle the unknown nmi */
1229 return NOTIFY_DONE;
1230 /*
1231 * This one is a PMU back-to-back nmi. Two events
1232 * trigger 'simultaneously' raising two back-to-back
1233 * NMIs. If the first NMI handles both, the latter
1234 * will be empty and daze the CPU. So, we drop it to
1235 * avoid false-positive 'unknown nmi' messages.
1236 */
1237 return NOTIFY_STOP;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001238 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001239 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001240 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001241
Ingo Molnar241771e2008-12-03 10:39:53 +01001242 apic_write(APIC_LVTPC, APIC_DM_NMI);
Robert Richter4177c422010-09-02 15:07:48 -04001243
1244 handled = x86_pmu.handle_irq(args->regs);
1245 if (!handled)
1246 return NOTIFY_DONE;
1247
1248 this_nmi = percpu_read(irq_stat.__nmi_count);
1249 if ((handled > 1) ||
1250 /* the next nmi could be a back-to-back nmi */
1251 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1252 (__get_cpu_var(pmu_nmi).handled > 1))) {
1253 /*
1254 * We could have two subsequent back-to-back nmis: The
1255 * first handles more than one counter, the 2nd
1256 * handles only one counter and the 3rd handles no
1257 * counter.
1258 *
1259 * This is the 2nd nmi because the previous was
1260 * handling more than one counter. We will mark the
1261 * next (3rd) and then drop it if unhandled.
1262 */
1263 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1264 __get_cpu_var(pmu_nmi).handled = handled;
1265 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001266
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001267 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001268}
1269
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001270static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1271 .notifier_call = perf_event_nmi_handler,
1272 .next = NULL,
1273 .priority = 1
1274};
1275
Peter Zijlstra63b14642010-01-22 16:32:17 +01001276static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001277static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001278
Peter Zijlstra63b14642010-01-22 16:32:17 +01001279static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001280x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001281{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001282 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001283
Stephane Eranian1da53e02010-01-18 10:58:01 +02001284 if (x86_pmu.event_constraints) {
1285 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001286 if ((event->hw.config & c->cmask) == c->code)
1287 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001288 }
1289 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001290
1291 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001292}
1293
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001294#include "perf_event_amd.c"
1295#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001296#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001297#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001298#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001299#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301300
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001301static int __cpuinit
1302x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1303{
1304 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001305 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001306
1307 switch (action & ~CPU_TASKS_FROZEN) {
1308 case CPU_UP_PREPARE:
1309 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001310 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001311 break;
1312
1313 case CPU_STARTING:
1314 if (x86_pmu.cpu_starting)
1315 x86_pmu.cpu_starting(cpu);
1316 break;
1317
1318 case CPU_DYING:
1319 if (x86_pmu.cpu_dying)
1320 x86_pmu.cpu_dying(cpu);
1321 break;
1322
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001323 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001324 case CPU_DEAD:
1325 if (x86_pmu.cpu_dead)
1326 x86_pmu.cpu_dead(cpu);
1327 break;
1328
1329 default:
1330 break;
1331 }
1332
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001333 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001334}
1335
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001336static void __init pmu_check_apic(void)
1337{
1338 if (cpu_has_apic)
1339 return;
1340
1341 x86_pmu.apic = 0;
1342 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1343 pr_info("no hardware sampling interrupt available.\n");
1344}
1345
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001346void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301347{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001348 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001349 int err;
1350
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001351 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001352
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301353 switch (boot_cpu_data.x86_vendor) {
1354 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001355 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301356 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301357 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001358 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301359 break;
Robert Richter41389602009-04-29 12:47:00 +02001360 default:
1361 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301362 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001363 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001364 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301365 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001366 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301367
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001368 pmu_check_apic();
1369
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001370 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001371
Peter Zijlstra3c447802010-03-04 21:49:01 +01001372 if (x86_pmu.quirks)
1373 x86_pmu.quirks();
1374
Robert Richter948b1bb2010-03-29 18:36:50 +02001375 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001377 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1378 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001379 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001380 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1381 perf_max_events = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001382
Robert Richter948b1bb2010-03-29 18:36:50 +02001383 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001385 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1386 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001387 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001388
Robert Richterd6dc0b42010-03-17 12:49:13 +01001389 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001390 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001391
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001392 perf_events_lapic_init();
1393 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001394
Peter Zijlstra63b14642010-01-22 16:32:17 +01001395 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001396 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1397 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001398
Peter Zijlstrab622d642010-02-01 15:36:30 +01001399 if (x86_pmu.event_constraints) {
1400 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001401 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001402 continue;
1403
Robert Richter948b1bb2010-03-29 18:36:50 +02001404 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1405 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001406 }
1407 }
1408
Ingo Molnar57c0c152009-09-21 12:20:38 +02001409 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001410 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1411 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1412 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001413 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001414 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001415 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001416
1417 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001418}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001419
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001420static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001421{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001422 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001423}
1424
Lin Ming4d1c52b2010-04-23 13:56:12 +08001425/*
1426 * Start group events scheduling transaction
1427 * Set the flag to make pmu::enable() not perform the
1428 * schedulability test, it will be performed at commit time
1429 */
1430static void x86_pmu_start_txn(const struct pmu *pmu)
1431{
1432 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1433
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001434 cpuc->group_flag |= PERF_EVENT_TXN;
Stephane Eranian90151c352010-05-25 16:23:10 +02001435 cpuc->n_txn = 0;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001436}
1437
1438/*
1439 * Stop group events scheduling transaction
1440 * Clear the flag and pmu::enable() will perform the
1441 * schedulability test.
1442 */
1443static void x86_pmu_cancel_txn(const struct pmu *pmu)
1444{
1445 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1446
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001447 cpuc->group_flag &= ~PERF_EVENT_TXN;
Stephane Eranian90151c352010-05-25 16:23:10 +02001448 /*
1449 * Truncate the collected events.
1450 */
1451 cpuc->n_added -= cpuc->n_txn;
1452 cpuc->n_events -= cpuc->n_txn;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001453}
1454
1455/*
1456 * Commit group events scheduling transaction
1457 * Perform the group schedulability test as a whole
1458 * Return 0 if success
1459 */
1460static int x86_pmu_commit_txn(const struct pmu *pmu)
1461{
1462 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1463 int assign[X86_PMC_IDX_MAX];
1464 int n, ret;
1465
1466 n = cpuc->n_events;
1467
1468 if (!x86_pmu_initialized())
1469 return -EAGAIN;
1470
1471 ret = x86_pmu.schedule_events(cpuc, n, assign);
1472 if (ret)
1473 return ret;
1474
1475 /*
1476 * copy new assignment, now we know it is possible
1477 * will be used by hw_perf_enable()
1478 */
1479 memcpy(cpuc->assign, assign, n*sizeof(int));
1480
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001481 cpuc->group_flag &= ~PERF_EVENT_TXN;
Stephane Eranian90151c352010-05-25 16:23:10 +02001482
Lin Ming4d1c52b2010-04-23 13:56:12 +08001483 return 0;
1484}
1485
Robert Richter4aeb0b42009-04-29 12:47:03 +02001486static const struct pmu pmu = {
1487 .enable = x86_pmu_enable,
1488 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001489 .start = x86_pmu_start,
1490 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001491 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001492 .unthrottle = x86_pmu_unthrottle,
Lin Ming4d1c52b2010-04-23 13:56:12 +08001493 .start_txn = x86_pmu_start_txn,
1494 .cancel_txn = x86_pmu_cancel_txn,
1495 .commit_txn = x86_pmu_commit_txn,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001496};
1497
Stephane Eranian1da53e02010-01-18 10:58:01 +02001498/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001499 * validate that we can schedule this event
1500 */
1501static int validate_event(struct perf_event *event)
1502{
1503 struct cpu_hw_events *fake_cpuc;
1504 struct event_constraint *c;
1505 int ret = 0;
1506
1507 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1508 if (!fake_cpuc)
1509 return -ENOMEM;
1510
1511 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1512
1513 if (!c || !c->weight)
1514 ret = -ENOSPC;
1515
1516 if (x86_pmu.put_event_constraints)
1517 x86_pmu.put_event_constraints(fake_cpuc, event);
1518
1519 kfree(fake_cpuc);
1520
1521 return ret;
1522}
1523
1524/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001525 * validate a single event group
1526 *
1527 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001528 * - check events are compatible which each other
1529 * - events do not compete for the same counter
1530 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001531 *
1532 * validation ensures the group can be loaded onto the
1533 * PMU if it was the only group available.
1534 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001535static int validate_group(struct perf_event *event)
1536{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001537 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001538 struct cpu_hw_events *fake_cpuc;
1539 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001540
Peter Zijlstra502568d2010-01-22 14:35:46 +01001541 ret = -ENOMEM;
1542 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1543 if (!fake_cpuc)
1544 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001545
Stephane Eranian1da53e02010-01-18 10:58:01 +02001546 /*
1547 * the event is not yet connected with its
1548 * siblings therefore we must first collect
1549 * existing siblings, then add the new event
1550 * before we can simulate the scheduling
1551 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001552 ret = -ENOSPC;
1553 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001554 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001555 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001556
Peter Zijlstra502568d2010-01-22 14:35:46 +01001557 fake_cpuc->n_events = n;
1558 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001559 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001560 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001561
Peter Zijlstra502568d2010-01-22 14:35:46 +01001562 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001563
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001564 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001565
1566out_free:
1567 kfree(fake_cpuc);
1568out:
1569 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001570}
1571
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001572const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001573{
Stephane Eranian81130702010-01-21 17:39:01 +02001574 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001575 int err;
1576
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001577 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001578 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001579 /*
1580 * we temporarily connect event to its pmu
1581 * such that validate_group() can classify
1582 * it as an x86 event using is_x86_event()
1583 */
1584 tmp = event->pmu;
1585 event->pmu = &pmu;
1586
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001587 if (event->group_leader != event)
1588 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001589 else
1590 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001591
1592 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001593 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001594 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001595 if (event->destroy)
1596 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001597 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001598 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001599
Robert Richter4aeb0b42009-04-29 12:47:03 +02001600 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001601}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001602
1603/*
1604 * callchain support
1605 */
1606
1607static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001608void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001609{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001610 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001611 entry->ip[entry->nr++] = ip;
1612}
1613
Tejun Heo245b2e72009-06-24 15:13:48 +09001614static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1615static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001616
1617
1618static void
1619backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1620{
1621 /* Ignore warnings */
1622}
1623
1624static void backtrace_warning(void *data, char *msg)
1625{
1626 /* Ignore warnings */
1627}
1628
1629static int backtrace_stack(void *data, char *name)
1630{
Ingo Molnar038e8362009-06-15 09:57:59 +02001631 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001632}
1633
1634static void backtrace_address(void *data, unsigned long addr, int reliable)
1635{
1636 struct perf_callchain_entry *entry = data;
1637
Frederic Weisbecker6f4dee02010-03-18 23:47:01 +01001638 callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001639}
1640
1641static const struct stacktrace_ops backtrace_ops = {
1642 .warning = backtrace_warning,
1643 .warning_symbol = backtrace_warning_symbol,
1644 .stack = backtrace_stack,
1645 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001646 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001647};
1648
1649static void
1650perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1651{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001652 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001653 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001654
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001655 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001656}
1657
Torok Edwin257ef9d2010-03-17 12:07:16 +02001658#ifdef CONFIG_COMPAT
1659static inline int
1660perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001661{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001662 /* 32-bit process in 64-bit kernel. */
1663 struct stack_frame_ia32 frame;
1664 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001665
Torok Edwin257ef9d2010-03-17 12:07:16 +02001666 if (!test_thread_flag(TIF_IA32))
1667 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001668
Torok Edwin257ef9d2010-03-17 12:07:16 +02001669 fp = compat_ptr(regs->bp);
1670 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1671 unsigned long bytes;
1672 frame.next_frame = 0;
1673 frame.return_address = 0;
1674
1675 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1676 if (bytes != sizeof(frame))
1677 break;
1678
1679 if (fp < compat_ptr(regs->sp))
1680 break;
1681
1682 callchain_store(entry, frame.return_address);
1683 fp = compat_ptr(frame.next_frame);
1684 }
1685 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001686}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001687#else
1688static inline int
1689perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1690{
1691 return 0;
1692}
1693#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001694
1695static void
1696perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1697{
1698 struct stack_frame frame;
1699 const void __user *fp;
1700
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001701 if (!user_mode(regs))
1702 regs = task_pt_regs(current);
1703
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001704 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001705
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001706 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001707 callchain_store(entry, regs->ip);
1708
Torok Edwin257ef9d2010-03-17 12:07:16 +02001709 if (perf_callchain_user32(regs, entry))
1710 return;
1711
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001712 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001713 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001714 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001715 frame.return_address = 0;
1716
Torok Edwin257ef9d2010-03-17 12:07:16 +02001717 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1718 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001719 break;
1720
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001721 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001722 break;
1723
1724 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001725 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001726 }
1727}
1728
1729static void
1730perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1731{
1732 int is_user;
1733
1734 if (!regs)
1735 return;
1736
1737 is_user = user_mode(regs);
1738
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001739 if (is_user && current->state != TASK_RUNNING)
1740 return;
1741
1742 if (!is_user)
1743 perf_callchain_kernel(regs, entry);
1744
1745 if (current->mm)
1746 perf_callchain_user(regs, entry);
1747}
1748
1749struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1750{
1751 struct perf_callchain_entry *entry;
1752
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001753 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1754 /* TODO: We don't support guest os callchain now */
1755 return NULL;
1756 }
1757
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001758 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001759 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001760 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001761 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001762
1763 entry->nr = 0;
1764
1765 perf_do_callchain(regs, entry);
1766
1767 return entry;
1768}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001769
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001770unsigned long perf_instruction_pointer(struct pt_regs *regs)
1771{
1772 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001773
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001774 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1775 ip = perf_guest_cbs->get_guest_ip();
1776 else
1777 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001778
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001779 return ip;
1780}
1781
1782unsigned long perf_misc_flags(struct pt_regs *regs)
1783{
1784 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001785
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001786 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001787 if (perf_guest_cbs->is_user_mode())
1788 misc |= PERF_RECORD_MISC_GUEST_USER;
1789 else
1790 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1791 } else {
1792 if (user_mode(regs))
1793 misc |= PERF_RECORD_MISC_USER;
1794 else
1795 misc |= PERF_RECORD_MISC_KERNEL;
1796 }
1797
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001798 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02001799 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001800
1801 return misc;
1802}