blob: 1dd42c18f1cb7b5df36834ec095c363398a070b7 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020031#include <asm/compat.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010032
Peter Zijlstra7645a242010-03-08 13:51:31 +010033#if 0
34#undef wrmsrl
35#define wrmsrl(msr, val) \
36do { \
37 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
38 (unsigned long)(val)); \
39 native_write_msr((msr), (u32)((u64)(val)), \
40 (u32)((u64)(val) >> 32)); \
41} while (0)
42#endif
43
Peter Zijlstraef21f682010-03-03 13:12:23 +010044/*
45 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
46 */
47static unsigned long
48copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
49{
50 unsigned long offset, addr = (unsigned long)from;
51 int type = in_nmi() ? KM_NMI : KM_IRQ0;
52 unsigned long size, len = 0;
53 struct page *page;
54 void *map;
55 int ret;
56
57 do {
58 ret = __get_user_pages_fast(addr, 1, 0, &page);
59 if (!ret)
60 break;
61
62 offset = addr & (PAGE_SIZE - 1);
63 size = min(PAGE_SIZE - offset, n - len);
64
65 map = kmap_atomic(page, type);
66 memcpy(to, map+offset, size);
67 kunmap_atomic(map, type);
68 put_page(page);
69
70 len += size;
71 to += size;
72 addr += size;
73
74 } while (len < n);
75
76 return len;
77}
78
Stephane Eranian1da53e02010-01-18 10:58:01 +020079struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010080 union {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010082 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010083 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010084 u64 code;
85 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010086 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020087};
88
Stephane Eranian38331f62010-02-08 17:17:01 +020089struct amd_nb {
90 int nb_id; /* NorthBridge id */
91 int refcnt; /* reference count */
92 struct perf_event *owners[X86_PMC_IDX_MAX];
93 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
94};
95
Peter Zijlstracaff2be2010-03-03 12:02:30 +010096#define MAX_LBR_ENTRIES 16
97
Ingo Molnarcdd6c482009-09-21 12:02:48 +020098struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +010099 /*
100 * Generic x86 PMC bits
101 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200102 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200103 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100104 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200105
106 int n_events;
107 int n_added;
108 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200109 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200110 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100111
112 /*
113 * Intel DebugStore bits
114 */
115 struct debug_store *ds;
116 u64 pebs_enabled;
117
118 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100119 * Intel LBR bits
120 */
121 int lbr_users;
122 void *lbr_context;
123 struct perf_branch_stack lbr_stack;
124 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
125
126 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100127 * AMD specific bits
128 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200129 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100130};
131
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100132#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100133 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100134 .code = (c), \
135 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100136 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100137}
Stephane Eranianb6900812009-10-06 16:42:09 +0200138
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100139#define EVENT_CONSTRAINT(c, n, m) \
140 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
141
Peter Zijlstraca037702010-03-02 19:52:12 +0100142/*
143 * Constraint on the Event code.
144 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100145#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200146 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100147
Peter Zijlstraca037702010-03-02 19:52:12 +0100148/*
149 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200150 *
151 * filter mask to validate fixed counter events.
152 * the following filters disqualify for fixed counters:
153 * - inv
154 * - edge
155 * - cnt-mask
156 * The other filters are supported by fixed counters.
157 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100158 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100159#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200160 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100161
Peter Zijlstraca037702010-03-02 19:52:12 +0100162/*
163 * Constraint on the Event code + UMask
164 */
165#define PEBS_EVENT_CONSTRAINT(c, n) \
166 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
167
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100168#define EVENT_CONSTRAINT_END \
169 EVENT_CONSTRAINT(0, 0, 0)
170
171#define for_each_event_constraint(e, c) \
172 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200173
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100174union perf_capabilities {
175 struct {
176 u64 lbr_format : 6;
177 u64 pebs_trap : 1;
178 u64 pebs_arch_reg : 1;
179 u64 pebs_format : 4;
180 u64 smm_freeze : 1;
181 };
182 u64 capabilities;
183};
184
Ingo Molnar241771e2008-12-03 10:39:53 +0100185/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200186 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100187 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200188struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100189 /*
190 * Generic x86 PMC bits
191 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200192 const char *name;
193 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800194 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200195 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100196 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100197 void (*enable)(struct perf_event *);
198 void (*disable)(struct perf_event *);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300199 int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
200 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530201 unsigned eventsel;
202 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100203 u64 (*event_map)(int);
204 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530205 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200206 int num_counters;
207 int num_counters_fixed;
208 int cntval_bits;
209 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200210 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200211 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100212 struct event_constraint *
213 (*get_event_constraints)(struct cpu_hw_events *cpuc,
214 struct perf_event *event);
215
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100216 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
217 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100218 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100219 void (*quirks)(void);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100220
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100221 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100222 void (*cpu_starting)(int cpu);
223 void (*cpu_dying)(int cpu);
224 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100225
226 /*
227 * Intel Arch Perfmon v2+
228 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100229 u64 intel_ctrl;
230 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100231
232 /*
233 * Intel DebugStore bits
234 */
235 int bts, pebs;
236 int pebs_record_size;
237 void (*drain_pebs)(struct pt_regs *regs);
238 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100239
240 /*
241 * Intel LBR
242 */
243 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
244 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530245};
246
Robert Richter4a06bd82009-04-29 12:47:11 +0200247static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530248
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200249static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100250 .enabled = 1,
251};
Ingo Molnar241771e2008-12-03 10:39:53 +0100252
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100253static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200254
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530255/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200256 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200257 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200258 * 'not supported', -1 means 'hw_event makes no sense on
259 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200260 * ID.
261 */
262
263#define C(x) PERF_COUNT_HW_CACHE_##x
264
265static u64 __read_mostly hw_cache_event_ids
266 [PERF_COUNT_HW_CACHE_MAX]
267 [PERF_COUNT_HW_CACHE_OP_MAX]
268 [PERF_COUNT_HW_CACHE_RESULT_MAX];
269
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530270/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200271 * Propagate event elapsed time into the generic event.
272 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100273 * Returns the delta events processed.
274 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200275static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100276x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100277{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100278 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200279 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200280 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100281 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200282 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100283
Markus Metzger30dd5682009-07-21 15:56:48 +0200284 if (idx == X86_PMC_IDX_FIXED_BTS)
285 return 0;
286
Ingo Molnaree060942008-12-13 09:00:03 +0100287 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200288 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100289 *
290 * Our tactic to handle this is to first atomically read and
291 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200292 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100293 */
294again:
295 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200296 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100297
298 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
299 new_raw_count) != prev_raw_count)
300 goto again;
301
302 /*
303 * Now we have the new raw value and have updated the prev
304 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200305 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100306 *
307 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200308 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100309 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200310 delta = (new_raw_count << shift) - (prev_raw_count << shift);
311 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100312
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200313 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100314 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200315
316 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100317}
318
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200319static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200320static DEFINE_MUTEX(pmc_reserve_mutex);
321
Robert Richterb27ea292010-03-17 12:49:10 +0100322#ifdef CONFIG_X86_LOCAL_APIC
323
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200324static bool reserve_pmc_hardware(void)
325{
326 int i;
327
328 if (nmi_watchdog == NMI_LOCAL_APIC)
329 disable_lapic_nmi_watchdog();
330
Robert Richter948b1bb2010-03-29 18:36:50 +0200331 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200332 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200333 goto perfctr_fail;
334 }
335
Robert Richter948b1bb2010-03-29 18:36:50 +0200336 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200337 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200338 goto eventsel_fail;
339 }
340
341 return true;
342
343eventsel_fail:
344 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200345 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200346
Robert Richter948b1bb2010-03-29 18:36:50 +0200347 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200348
349perfctr_fail:
350 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200351 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200352
353 if (nmi_watchdog == NMI_LOCAL_APIC)
354 enable_lapic_nmi_watchdog();
355
356 return false;
357}
358
359static void release_pmc_hardware(void)
360{
361 int i;
362
Robert Richter948b1bb2010-03-29 18:36:50 +0200363 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200364 release_perfctr_nmi(x86_pmu.perfctr + i);
365 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200366 }
367
368 if (nmi_watchdog == NMI_LOCAL_APIC)
369 enable_lapic_nmi_watchdog();
370}
371
Robert Richterb27ea292010-03-17 12:49:10 +0100372#else
373
374static bool reserve_pmc_hardware(void) { return true; }
375static void release_pmc_hardware(void) {}
376
377#endif
378
Peter Zijlstraca037702010-03-02 19:52:12 +0100379static int reserve_ds_buffers(void);
380static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200381
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200382static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200383{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200384 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200385 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100386 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200387 mutex_unlock(&pmc_reserve_mutex);
388 }
389}
390
Robert Richter85cf9db2009-04-29 12:47:20 +0200391static inline int x86_pmu_initialized(void)
392{
393 return x86_pmu.handle_irq != NULL;
394}
395
Ingo Molnar8326f442009-06-05 20:22:46 +0200396static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200397set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200398{
399 unsigned int cache_type, cache_op, cache_result;
400 u64 config, val;
401
402 config = attr->config;
403
404 cache_type = (config >> 0) & 0xff;
405 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
406 return -EINVAL;
407
408 cache_op = (config >> 8) & 0xff;
409 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
410 return -EINVAL;
411
412 cache_result = (config >> 16) & 0xff;
413 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
414 return -EINVAL;
415
416 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
417
418 if (val == 0)
419 return -ENOENT;
420
421 if (val == -1)
422 return -EINVAL;
423
424 hwc->config |= val;
425
426 return 0;
427}
428
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300429static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
430{
431 /*
432 * Generate PMC IRQs:
433 * (keep 'enabled' bit clear for now)
434 */
435 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
436
437 /*
438 * Count user and OS events unless requested not to
439 */
440 if (!attr->exclude_user)
441 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
442 if (!attr->exclude_kernel)
443 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
444
445 return 0;
446}
447
Robert Richtera098f442010-03-30 11:28:21 +0200448static u64 x86_pmu_raw_event(u64 hw_event)
449{
450 return hw_event & X86_RAW_EVENT_MASK;
451}
452
Ingo Molnaree060942008-12-13 09:00:03 +0100453/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200454 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100455 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200456static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100457{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200458 struct perf_event_attr *attr = &event->attr;
459 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200460 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200461 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100462
Robert Richter85cf9db2009-04-29 12:47:20 +0200463 if (!x86_pmu_initialized())
464 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100465
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200466 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200467 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200468 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200469 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200470 if (!reserve_pmc_hardware())
471 err = -EBUSY;
Stephane Eranian4b24a882010-03-17 23:21:01 +0200472 else {
Peter Zijlstraca037702010-03-02 19:52:12 +0100473 err = reserve_ds_buffers();
Stephane Eranian4b24a882010-03-17 23:21:01 +0200474 if (err)
475 release_pmc_hardware();
476 }
Markus Metzger30dd5682009-07-21 15:56:48 +0200477 }
478 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200479 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200480 mutex_unlock(&pmc_reserve_mutex);
481 }
482 if (err)
483 return err;
484
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200485 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200486
Stephane Eranianb6900812009-10-06 16:42:09 +0200487 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200488 hwc->last_cpu = -1;
489 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200490
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300491 /* Processor specifics */
Robert Richter984763c2010-03-16 17:07:33 +0100492 err = x86_pmu.hw_config(attr, hwc);
493 if (err)
494 return err;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100495
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200496 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200497 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200498 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200499 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200500 } else {
501 /*
502 * If we have a PMU initialized but no APIC
503 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200504 * events (user-space has to fall back and
505 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200506 */
507 if (!x86_pmu.apic)
508 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200509 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200510
Ingo Molnar241771e2008-12-03 10:39:53 +0100511 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200512 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100513 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200514 if (attr->type == PERF_TYPE_RAW) {
515 hwc->config |= x86_pmu.raw_event(attr->config);
Peter Zijlstra320ebf02010-03-02 12:35:37 +0100516 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
517 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
518 return -EACCES;
Ingo Molnar8326f442009-06-05 20:22:46 +0200519 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100520 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100521
Ingo Molnar8326f442009-06-05 20:22:46 +0200522 if (attr->type == PERF_TYPE_HW_CACHE)
523 return set_ext_hw_attr(hwc, attr);
524
525 if (attr->config >= x86_pmu.max_events)
526 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200527
Ingo Molnar8326f442009-06-05 20:22:46 +0200528 /*
529 * The generic map:
530 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200531 config = x86_pmu.event_map(attr->config);
532
533 if (config == 0)
534 return -ENOENT;
535
536 if (config == -1LL)
537 return -EINVAL;
538
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200539 /*
540 * Branch tracing:
541 */
542 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200543 (hwc->sample_period == 1)) {
544 /* BTS is not supported by this architecture. */
Peter Zijlstraca037702010-03-02 19:52:12 +0100545 if (!x86_pmu.bts)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200546 return -EOPNOTSUPP;
547
548 /* BTS is currently only allowed for user-mode. */
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300549 if (!attr->exclude_kernel)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200550 return -EOPNOTSUPP;
551 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200552
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200553 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200554
Ingo Molnar241771e2008-12-03 10:39:53 +0100555 return 0;
556}
557
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100558static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530559{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200560 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200561 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100562
Robert Richter948b1bb2010-03-29 18:36:50 +0200563 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100564 u64 val;
565
Robert Richter43f62012009-04-29 16:55:56 +0200566 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200567 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100568 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100569 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200570 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100571 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100572 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530573 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530574}
575
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200576void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530577{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200578 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
579
Robert Richter85cf9db2009-04-29 12:47:20 +0200580 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200581 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200582
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100583 if (!cpuc->enabled)
584 return;
585
586 cpuc->n_added = 0;
587 cpuc->enabled = 0;
588 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200589
590 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530591}
Ingo Molnar241771e2008-12-03 10:39:53 +0100592
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100593static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530594{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200595 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530596 int idx;
597
Robert Richter948b1bb2010-03-29 18:36:50 +0200598 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200599 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200600 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100601
Robert Richter43f62012009-04-29 16:55:56 +0200602 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200603 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200604
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200605 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100606 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100607 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530608 }
609}
610
Stephane Eranian1da53e02010-01-18 10:58:01 +0200611static const struct pmu pmu;
612
613static inline int is_x86_event(struct perf_event *event)
614{
615 return event->pmu == &pmu;
616}
617
618static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
619{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100620 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200621 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100622 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200623 struct hw_perf_event *hwc;
624
625 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
626
627 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100628 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
629 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200630 }
631
632 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200633 * fastpath, try to reuse previous register
634 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100635 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200636 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100637 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200638
639 /* never assigned */
640 if (hwc->idx == -1)
641 break;
642
643 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100644 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200645 break;
646
647 /* not already used */
648 if (test_bit(hwc->idx, used_mask))
649 break;
650
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100651 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200652 if (assign)
653 assign[i] = hwc->idx;
654 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100655 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200656 goto done;
657
658 /*
659 * begin slow path
660 */
661
662 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
663
664 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200665 * weight = number of possible counters
666 *
667 * 1 = most constrained, only works on one counter
668 * wmax = least constrained, works on any counter
669 *
670 * assign events to counters starting with most
671 * constrained events.
672 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200673 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200674
675 /*
676 * when fixed event counters are present,
677 * wmax is incremented by 1 to account
678 * for one more choice
679 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200680 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200681 wmax++;
682
Stephane Eranian81130702010-01-21 17:39:01 +0200683 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200684 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200685 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100686 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200687 hwc = &cpuc->event_list[i]->hw;
688
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100689 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200690 continue;
691
Akinobu Mita984b3f52010-03-05 13:41:37 -0800692 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200693 if (!test_bit(j, used_mask))
694 break;
695 }
696
697 if (j == X86_PMC_IDX_MAX)
698 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200699
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100700 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200701
Stephane Eranian1da53e02010-01-18 10:58:01 +0200702 if (assign)
703 assign[i] = j;
704 num--;
705 }
706 }
Stephane Eranian81130702010-01-21 17:39:01 +0200707done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200708 /*
709 * scheduling failed or is just a simulation,
710 * free resources if necessary
711 */
712 if (!assign || num) {
713 for (i = 0; i < n; i++) {
714 if (x86_pmu.put_event_constraints)
715 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
716 }
717 }
718 return num ? -ENOSPC : 0;
719}
720
721/*
722 * dogrp: true if must collect siblings events (group)
723 * returns total number of events and error code
724 */
725static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
726{
727 struct perf_event *event;
728 int n, max_count;
729
Robert Richter948b1bb2010-03-29 18:36:50 +0200730 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200731
732 /* current number of events already accepted */
733 n = cpuc->n_events;
734
735 if (is_x86_event(leader)) {
736 if (n >= max_count)
737 return -ENOSPC;
738 cpuc->event_list[n] = leader;
739 n++;
740 }
741 if (!dogrp)
742 return n;
743
744 list_for_each_entry(event, &leader->sibling_list, group_entry) {
745 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200746 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200747 continue;
748
749 if (n >= max_count)
750 return -ENOSPC;
751
752 cpuc->event_list[n] = event;
753 n++;
754 }
755 return n;
756}
757
Stephane Eranian1da53e02010-01-18 10:58:01 +0200758static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200759 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200760{
Stephane Eranian447a1942010-02-01 14:50:01 +0200761 struct hw_perf_event *hwc = &event->hw;
762
763 hwc->idx = cpuc->assign[i];
764 hwc->last_cpu = smp_processor_id();
765 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200766
767 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
768 hwc->config_base = 0;
769 hwc->event_base = 0;
770 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
771 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
772 /*
773 * We set it so that event_base + idx in wrmsr/rdmsr maps to
774 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
775 */
776 hwc->event_base =
777 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
778 } else {
779 hwc->config_base = x86_pmu.eventsel;
780 hwc->event_base = x86_pmu.perfctr;
781 }
782}
783
Stephane Eranian447a1942010-02-01 14:50:01 +0200784static inline int match_prev_assignment(struct hw_perf_event *hwc,
785 struct cpu_hw_events *cpuc,
786 int i)
787{
788 return hwc->idx == cpuc->assign[i] &&
789 hwc->last_cpu == smp_processor_id() &&
790 hwc->last_tag == cpuc->tags[i];
791}
792
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100793static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200794static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100795
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200796void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100797{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200798 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
799 struct perf_event *event;
800 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100801 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200802
Robert Richter85cf9db2009-04-29 12:47:20 +0200803 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100804 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100805
806 if (cpuc->enabled)
807 return;
808
Stephane Eranian1da53e02010-01-18 10:58:01 +0200809 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100810 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200811 /*
812 * apply assignment obtained either from
813 * hw_perf_group_sched_in() or x86_pmu_enable()
814 *
815 * step1: save events moving to new counters
816 * step2: reprogram moved events into new counters
817 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100818 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819 event = cpuc->event_list[i];
820 hwc = &event->hw;
821
Stephane Eranian447a1942010-02-01 14:50:01 +0200822 /*
823 * we can avoid reprogramming counter if:
824 * - assigned same counter as last time
825 * - running on same CPU as last time
826 * - no other event has used the counter since
827 */
828 if (hwc->idx == -1 ||
829 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200830 continue;
831
Stephane Eraniand76a0812010-02-08 17:06:01 +0200832 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200833 }
834
835 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200836 event = cpuc->event_list[i];
837 hwc = &event->hw;
838
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100839 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200840 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100841 else if (i < n_running)
842 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200843
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100844 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200845 }
846 cpuc->n_added = 0;
847 perf_events_lapic_init();
848 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100849
850 cpuc->enabled = 1;
851 barrier();
852
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100853 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100854}
Ingo Molnaree060942008-12-13 09:00:03 +0100855
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100857{
Peter Zijlstra7645a242010-03-08 13:51:31 +0100858 wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100860}
861
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100862static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100863{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100864 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100865
866 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100867}
868
Tejun Heo245b2e72009-06-24 15:13:48 +0900869static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100870
Ingo Molnaree060942008-12-13 09:00:03 +0100871/*
872 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200873 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100874 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200875static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100876x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100877{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100878 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100879 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200880 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100881 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100882
Markus Metzger30dd5682009-07-21 15:56:48 +0200883 if (idx == X86_PMC_IDX_FIXED_BTS)
884 return 0;
885
Ingo Molnaree060942008-12-13 09:00:03 +0100886 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200887 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100888 */
889 if (unlikely(left <= -period)) {
890 left = period;
891 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200892 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200893 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100894 }
895
896 if (unlikely(left <= 0)) {
897 left += period;
898 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200899 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200900 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100901 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200902 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200903 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200904 */
905 if (unlikely(left < 2))
906 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100907
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200908 if (left > x86_pmu.max_period)
909 left = x86_pmu.max_period;
910
Tejun Heo245b2e72009-06-24 15:13:48 +0900911 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100912
913 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200914 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100915 * mark it to be able to extra future deltas:
916 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100917 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100918
Peter Zijlstra7645a242010-03-08 13:51:31 +0100919 wrmsrl(hwc->event_base + idx,
Robert Richter948b1bb2010-03-29 18:36:50 +0200920 (u64)(-left) & x86_pmu.cntval_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200921
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200922 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200923
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200924 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100925}
926
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100927static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200928{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200929 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200930 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100931 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100932}
933
Ingo Molnaree060942008-12-13 09:00:03 +0100934/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200935 * activate a single event
936 *
937 * The event is added to the group of enabled events
938 * but only if it can be scehduled with existing events.
939 *
940 * Called with PMU disabled. If successful and return value 1,
941 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200942 */
943static int x86_pmu_enable(struct perf_event *event)
944{
945 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200946 struct hw_perf_event *hwc;
947 int assign[X86_PMC_IDX_MAX];
948 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200949
Stephane Eranian1da53e02010-01-18 10:58:01 +0200950 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200951
Stephane Eranian1da53e02010-01-18 10:58:01 +0200952 n0 = cpuc->n_events;
953 n = collect_events(cpuc, event, false);
954 if (n < 0)
955 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200956
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300957 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200958 if (ret)
959 return ret;
960 /*
961 * copy new assignment, now we know it is possible
962 * will be used by hw_perf_enable()
963 */
964 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100965
Stephane Eranian1da53e02010-01-18 10:58:01 +0200966 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100967 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100968
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100969 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100970}
971
Stephane Eraniand76a0812010-02-08 17:06:01 +0200972static int x86_pmu_start(struct perf_event *event)
973{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100974 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
975 int idx = event->hw.idx;
976
977 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200978 return -EAGAIN;
979
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100980 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100981 cpuc->events[idx] = event;
982 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100983 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100984 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200985
986 return 0;
987}
988
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200989static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200990{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100991 int ret = x86_pmu_start(event);
992 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200993}
994
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200995void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100996{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100997 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100998 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200999 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001000 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001001 int cpu, idx;
1002
Robert Richter948b1bb2010-03-29 18:36:50 +02001003 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001004 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001005
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001006 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001007
1008 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001009 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001010
Robert Richterfaa28ae2009-04-29 12:47:13 +02001011 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301012 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1013 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1014 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1015 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001016 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001017
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301018 pr_info("\n");
1019 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1020 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1021 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1022 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001023 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301024 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001025 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001026
Robert Richter948b1bb2010-03-29 18:36:50 +02001027 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001028 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1029 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001030
Tejun Heo245b2e72009-06-24 15:13:48 +09001031 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001032
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301033 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001034 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301035 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001036 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301037 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001038 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001039 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001040 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001041 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1042
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301043 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001044 cpu, idx, pmc_count);
1045 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001046 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001047}
1048
Stephane Eraniand76a0812010-02-08 17:06:01 +02001049static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001050{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001051 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001052 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001053 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001054
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001055 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1056 return;
1057
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001058 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001059
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001060 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001061 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001062 * that we are disabling:
1063 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001064 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001065
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001066 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001067}
1068
1069static void x86_pmu_disable(struct perf_event *event)
1070{
1071 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1072 int i;
1073
Stephane Eraniand76a0812010-02-08 17:06:01 +02001074 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001075
Stephane Eranian1da53e02010-01-18 10:58:01 +02001076 for (i = 0; i < cpuc->n_events; i++) {
1077 if (event == cpuc->event_list[i]) {
1078
1079 if (x86_pmu.put_event_constraints)
1080 x86_pmu.put_event_constraints(cpuc, event);
1081
1082 while (++i < cpuc->n_events)
1083 cpuc->event_list[i-1] = cpuc->event_list[i];
1084
1085 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001086 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001087 }
1088 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001089 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001090}
1091
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001092static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001093{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001094 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001095 struct cpu_hw_events *cpuc;
1096 struct perf_event *event;
1097 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001098 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001099 u64 val;
1100
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001101 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001102
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001104
Robert Richter948b1bb2010-03-29 18:36:50 +02001105 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001106 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001107 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001108
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001109 event = cpuc->events[idx];
1110 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001111
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001112 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001113 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001114 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001115
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001116 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001117 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001118 */
1119 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001121
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001122 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001123 continue;
1124
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001125 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001126 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001127 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001128
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001129 if (handled)
1130 inc_irq_stat(apic_perf_irqs);
1131
Robert Richtera29aa8a2009-04-29 12:47:21 +02001132 return handled;
1133}
Robert Richter39d81ea2009-04-29 12:47:05 +02001134
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001135void smp_perf_pending_interrupt(struct pt_regs *regs)
1136{
1137 irq_enter();
1138 ack_APIC_irq();
1139 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001140 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001141 irq_exit();
1142}
1143
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001144void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001145{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001146#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001147 if (!x86_pmu.apic || !x86_pmu_initialized())
1148 return;
1149
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001150 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001151#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001152}
1153
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001154void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001155{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001156 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001157 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001158
Ingo Molnar241771e2008-12-03 10:39:53 +01001159 /*
Yong Wangc323d952009-05-29 13:28:35 +08001160 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001161 */
Yong Wangc323d952009-05-29 13:28:35 +08001162 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001163}
1164
1165static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001166perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001167 unsigned long cmd, void *__args)
1168{
1169 struct die_args *args = __args;
1170 struct pt_regs *regs;
1171
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001172 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001173 return NOTIFY_DONE;
1174
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001175 switch (cmd) {
1176 case DIE_NMI:
1177 case DIE_NMI_IPI:
1178 break;
1179
1180 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001181 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001182 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001183
1184 regs = args->regs;
1185
1186 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001187 /*
1188 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001190 *
1191 * If the first NMI handles both, the latter will be empty and daze
1192 * the CPU.
1193 */
Yong Wanga3288102009-06-03 13:12:55 +08001194 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001195
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001196 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001197}
1198
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001199static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1200 .notifier_call = perf_event_nmi_handler,
1201 .next = NULL,
1202 .priority = 1
1203};
1204
Peter Zijlstra63b14642010-01-22 16:32:17 +01001205static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001206static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001207
Peter Zijlstra63b14642010-01-22 16:32:17 +01001208static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001209x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001210{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001211 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001212
Stephane Eranian1da53e02010-01-18 10:58:01 +02001213 if (x86_pmu.event_constraints) {
1214 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001215 if ((event->hw.config & c->cmask) == c->code)
1216 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001217 }
1218 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001219
1220 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001221}
1222
Stephane Eranian1da53e02010-01-18 10:58:01 +02001223static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001224 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001225{
1226 int ret = 0;
1227
1228 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001229 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001230 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1231
1232 if (!is_x86_event(event))
1233 ret = event->pmu->enable(event);
1234
1235 if (!ret && !is_software_event(event))
1236 cpuctx->active_oncpu++;
1237
1238 if (!ret && event->attr.exclusive)
1239 cpuctx->exclusive = 1;
1240
1241 return ret;
1242}
1243
1244static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001245 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001246{
1247 event->state = PERF_EVENT_STATE_INACTIVE;
1248 event->oncpu = -1;
1249
1250 if (!is_x86_event(event))
1251 event->pmu->disable(event);
1252
1253 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1254
1255 if (!is_software_event(event))
1256 cpuctx->active_oncpu--;
1257
1258 if (event->attr.exclusive || !cpuctx->active_oncpu)
1259 cpuctx->exclusive = 0;
1260}
1261
1262/*
1263 * Called to enable a whole group of events.
1264 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1265 * Assumes the caller has disabled interrupts and has
1266 * frozen the PMU with hw_perf_save_disable.
1267 *
1268 * called with PMU disabled. If successful and return value 1,
1269 * then guaranteed to call perf_enable() and hw_perf_enable()
1270 */
1271int hw_perf_group_sched_in(struct perf_event *leader,
1272 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001273 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001274{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001275 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001276 struct perf_event *sub;
1277 int assign[X86_PMC_IDX_MAX];
1278 int n0, n1, ret;
1279
Cyrill Gorcunov0b861222010-03-12 00:50:16 +03001280 if (!x86_pmu_initialized())
1281 return 0;
1282
Stephane Eranian1da53e02010-01-18 10:58:01 +02001283 /* n0 = total number of events */
1284 n0 = collect_events(cpuc, leader, true);
1285 if (n0 < 0)
1286 return n0;
1287
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001288 ret = x86_pmu.schedule_events(cpuc, n0, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001289 if (ret)
1290 return ret;
1291
Peter Zijlstra6e377382010-02-11 13:21:58 +01001292 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001293 if (ret)
1294 return ret;
1295
1296 n1 = 1;
1297 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001298 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001299 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001300 if (ret)
1301 goto undo;
1302 ++n1;
1303 }
1304 }
1305 /*
1306 * copy new assignment, now we know it is possible
1307 * will be used by hw_perf_enable()
1308 */
1309 memcpy(cpuc->assign, assign, n0*sizeof(int));
1310
1311 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001312 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001313 ctx->nr_active += n1;
1314
1315 /*
1316 * 1 means successful and events are active
1317 * This is not quite true because we defer
1318 * actual activation until hw_perf_enable() but
1319 * this way we* ensure caller won't try to enable
1320 * individual events
1321 */
1322 return 1;
1323undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001324 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001325 n0 = 1;
1326 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1327 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001328 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001329 if (++n0 == n1)
1330 break;
1331 }
1332 }
1333 return ret;
1334}
1335
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001336#include "perf_event_amd.c"
1337#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001338#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001339#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001340#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001341#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301342
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001343static int __cpuinit
1344x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1345{
1346 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001347 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001348
1349 switch (action & ~CPU_TASKS_FROZEN) {
1350 case CPU_UP_PREPARE:
1351 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001352 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001353 break;
1354
1355 case CPU_STARTING:
1356 if (x86_pmu.cpu_starting)
1357 x86_pmu.cpu_starting(cpu);
1358 break;
1359
1360 case CPU_DYING:
1361 if (x86_pmu.cpu_dying)
1362 x86_pmu.cpu_dying(cpu);
1363 break;
1364
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001365 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001366 case CPU_DEAD:
1367 if (x86_pmu.cpu_dead)
1368 x86_pmu.cpu_dead(cpu);
1369 break;
1370
1371 default:
1372 break;
1373 }
1374
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001375 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001376}
1377
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001378static void __init pmu_check_apic(void)
1379{
1380 if (cpu_has_apic)
1381 return;
1382
1383 x86_pmu.apic = 0;
1384 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1385 pr_info("no hardware sampling interrupt available.\n");
1386}
1387
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001388void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301389{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001390 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001391 int err;
1392
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001393 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001394
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301395 switch (boot_cpu_data.x86_vendor) {
1396 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001397 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301398 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301399 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001400 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301401 break;
Robert Richter41389602009-04-29 12:47:00 +02001402 default:
1403 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301404 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001405 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001406 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301407 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001408 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301409
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001410 pmu_check_apic();
1411
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001412 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001413
Peter Zijlstra3c447802010-03-04 21:49:01 +01001414 if (x86_pmu.quirks)
1415 x86_pmu.quirks();
1416
Robert Richter948b1bb2010-03-29 18:36:50 +02001417 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001418 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001419 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1420 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001421 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001422 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1423 perf_max_events = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001424
Robert Richter948b1bb2010-03-29 18:36:50 +02001425 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001426 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001427 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1428 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001429 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001430
Robert Richterd6dc0b42010-03-17 12:49:13 +01001431 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001432 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001433
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001434 perf_events_lapic_init();
1435 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001436
Peter Zijlstra63b14642010-01-22 16:32:17 +01001437 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001438 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1439 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001440
Peter Zijlstrab622d642010-02-01 15:36:30 +01001441 if (x86_pmu.event_constraints) {
1442 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001443 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001444 continue;
1445
Robert Richter948b1bb2010-03-29 18:36:50 +02001446 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1447 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001448 }
1449 }
1450
Ingo Molnar57c0c152009-09-21 12:20:38 +02001451 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001452 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1453 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1454 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001455 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001456 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001457 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001458
1459 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001460}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001461
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001462static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001463{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001464 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001465}
1466
Robert Richter4aeb0b42009-04-29 12:47:03 +02001467static const struct pmu pmu = {
1468 .enable = x86_pmu_enable,
1469 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001470 .start = x86_pmu_start,
1471 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001472 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001473 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001474};
1475
Stephane Eranian1da53e02010-01-18 10:58:01 +02001476/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001477 * validate that we can schedule this event
1478 */
1479static int validate_event(struct perf_event *event)
1480{
1481 struct cpu_hw_events *fake_cpuc;
1482 struct event_constraint *c;
1483 int ret = 0;
1484
1485 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1486 if (!fake_cpuc)
1487 return -ENOMEM;
1488
1489 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1490
1491 if (!c || !c->weight)
1492 ret = -ENOSPC;
1493
1494 if (x86_pmu.put_event_constraints)
1495 x86_pmu.put_event_constraints(fake_cpuc, event);
1496
1497 kfree(fake_cpuc);
1498
1499 return ret;
1500}
1501
1502/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001503 * validate a single event group
1504 *
1505 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001506 * - check events are compatible which each other
1507 * - events do not compete for the same counter
1508 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001509 *
1510 * validation ensures the group can be loaded onto the
1511 * PMU if it was the only group available.
1512 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001513static int validate_group(struct perf_event *event)
1514{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001515 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001516 struct cpu_hw_events *fake_cpuc;
1517 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001518
Peter Zijlstra502568d2010-01-22 14:35:46 +01001519 ret = -ENOMEM;
1520 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1521 if (!fake_cpuc)
1522 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001523
Stephane Eranian1da53e02010-01-18 10:58:01 +02001524 /*
1525 * the event is not yet connected with its
1526 * siblings therefore we must first collect
1527 * existing siblings, then add the new event
1528 * before we can simulate the scheduling
1529 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001530 ret = -ENOSPC;
1531 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001532 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001533 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001534
Peter Zijlstra502568d2010-01-22 14:35:46 +01001535 fake_cpuc->n_events = n;
1536 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001537 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001538 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001539
Peter Zijlstra502568d2010-01-22 14:35:46 +01001540 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001541
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001542 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001543
1544out_free:
1545 kfree(fake_cpuc);
1546out:
1547 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001548}
1549
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001550const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001551{
Stephane Eranian81130702010-01-21 17:39:01 +02001552 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001553 int err;
1554
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001555 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001556 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001557 /*
1558 * we temporarily connect event to its pmu
1559 * such that validate_group() can classify
1560 * it as an x86 event using is_x86_event()
1561 */
1562 tmp = event->pmu;
1563 event->pmu = &pmu;
1564
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001565 if (event->group_leader != event)
1566 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001567 else
1568 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001569
1570 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001571 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001572 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001573 if (event->destroy)
1574 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001575 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001576 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001577
Robert Richter4aeb0b42009-04-29 12:47:03 +02001578 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001579}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001580
1581/*
1582 * callchain support
1583 */
1584
1585static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001586void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001587{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001588 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001589 entry->ip[entry->nr++] = ip;
1590}
1591
Tejun Heo245b2e72009-06-24 15:13:48 +09001592static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1593static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001594
1595
1596static void
1597backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1598{
1599 /* Ignore warnings */
1600}
1601
1602static void backtrace_warning(void *data, char *msg)
1603{
1604 /* Ignore warnings */
1605}
1606
1607static int backtrace_stack(void *data, char *name)
1608{
Ingo Molnar038e8362009-06-15 09:57:59 +02001609 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001610}
1611
1612static void backtrace_address(void *data, unsigned long addr, int reliable)
1613{
1614 struct perf_callchain_entry *entry = data;
1615
1616 if (reliable)
1617 callchain_store(entry, addr);
1618}
1619
1620static const struct stacktrace_ops backtrace_ops = {
1621 .warning = backtrace_warning,
1622 .warning_symbol = backtrace_warning_symbol,
1623 .stack = backtrace_stack,
1624 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001625 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001626};
1627
Ingo Molnar038e8362009-06-15 09:57:59 +02001628#include "../dumpstack.h"
1629
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001630static void
1631perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1632{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001633 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001634 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001635
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001636 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001637}
1638
Torok Edwin257ef9d2010-03-17 12:07:16 +02001639#ifdef CONFIG_COMPAT
1640static inline int
1641perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001642{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001643 /* 32-bit process in 64-bit kernel. */
1644 struct stack_frame_ia32 frame;
1645 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001646
Torok Edwin257ef9d2010-03-17 12:07:16 +02001647 if (!test_thread_flag(TIF_IA32))
1648 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001649
Torok Edwin257ef9d2010-03-17 12:07:16 +02001650 fp = compat_ptr(regs->bp);
1651 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1652 unsigned long bytes;
1653 frame.next_frame = 0;
1654 frame.return_address = 0;
1655
1656 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1657 if (bytes != sizeof(frame))
1658 break;
1659
1660 if (fp < compat_ptr(regs->sp))
1661 break;
1662
1663 callchain_store(entry, frame.return_address);
1664 fp = compat_ptr(frame.next_frame);
1665 }
1666 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001667}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001668#else
1669static inline int
1670perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1671{
1672 return 0;
1673}
1674#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001675
1676static void
1677perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1678{
1679 struct stack_frame frame;
1680 const void __user *fp;
1681
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001682 if (!user_mode(regs))
1683 regs = task_pt_regs(current);
1684
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001685 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001686
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001687 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001688 callchain_store(entry, regs->ip);
1689
Torok Edwin257ef9d2010-03-17 12:07:16 +02001690 if (perf_callchain_user32(regs, entry))
1691 return;
1692
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001693 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001694 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001695 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001696 frame.return_address = 0;
1697
Torok Edwin257ef9d2010-03-17 12:07:16 +02001698 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1699 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001700 break;
1701
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001702 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001703 break;
1704
1705 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001706 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001707 }
1708}
1709
1710static void
1711perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1712{
1713 int is_user;
1714
1715 if (!regs)
1716 return;
1717
1718 is_user = user_mode(regs);
1719
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001720 if (is_user && current->state != TASK_RUNNING)
1721 return;
1722
1723 if (!is_user)
1724 perf_callchain_kernel(regs, entry);
1725
1726 if (current->mm)
1727 perf_callchain_user(regs, entry);
1728}
1729
1730struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1731{
1732 struct perf_callchain_entry *entry;
1733
1734 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001735 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001736 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001737 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001738
1739 entry->nr = 0;
1740
1741 perf_do_callchain(regs, entry);
1742
1743 return entry;
1744}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001745
1746void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1747{
1748 regs->ip = ip;
1749 /*
1750 * perf_arch_fetch_caller_regs adds another call, we need to increment
1751 * the skip level
1752 */
1753 regs->bp = rewind_frame_pointer(skip + 1);
1754 regs->cs = __KERNEL_CS;
1755 local_save_flags(regs->flags);
1756}