blob: 353a174adb4478bd7ece980169d706cb0c7143d7 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020031#include <asm/compat.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010032
Peter Zijlstra7645a242010-03-08 13:51:31 +010033#if 0
34#undef wrmsrl
35#define wrmsrl(msr, val) \
36do { \
37 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
38 (unsigned long)(val)); \
39 native_write_msr((msr), (u32)((u64)(val)), \
40 (u32)((u64)(val) >> 32)); \
41} while (0)
42#endif
43
Peter Zijlstraef21f682010-03-03 13:12:23 +010044/*
45 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
46 */
47static unsigned long
48copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
49{
50 unsigned long offset, addr = (unsigned long)from;
51 int type = in_nmi() ? KM_NMI : KM_IRQ0;
52 unsigned long size, len = 0;
53 struct page *page;
54 void *map;
55 int ret;
56
57 do {
58 ret = __get_user_pages_fast(addr, 1, 0, &page);
59 if (!ret)
60 break;
61
62 offset = addr & (PAGE_SIZE - 1);
63 size = min(PAGE_SIZE - offset, n - len);
64
65 map = kmap_atomic(page, type);
66 memcpy(to, map+offset, size);
67 kunmap_atomic(map, type);
68 put_page(page);
69
70 len += size;
71 to += size;
72 addr += size;
73
74 } while (len < n);
75
76 return len;
77}
78
Stephane Eranian1da53e02010-01-18 10:58:01 +020079struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010080 union {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010082 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010083 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010084 u64 code;
85 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010086 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020087};
88
Stephane Eranian38331f62010-02-08 17:17:01 +020089struct amd_nb {
90 int nb_id; /* NorthBridge id */
91 int refcnt; /* reference count */
92 struct perf_event *owners[X86_PMC_IDX_MAX];
93 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
94};
95
Peter Zijlstracaff2be2010-03-03 12:02:30 +010096#define MAX_LBR_ENTRIES 16
97
Ingo Molnarcdd6c482009-09-21 12:02:48 +020098struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +010099 /*
100 * Generic x86 PMC bits
101 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200102 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200103 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100104 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200105
106 int n_events;
107 int n_added;
108 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200109 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200110 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100111
112 /*
113 * Intel DebugStore bits
114 */
115 struct debug_store *ds;
116 u64 pebs_enabled;
117
118 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100119 * Intel LBR bits
120 */
121 int lbr_users;
122 void *lbr_context;
123 struct perf_branch_stack lbr_stack;
124 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
125
126 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100127 * AMD specific bits
128 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200129 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100130};
131
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100132#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100133 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100134 .code = (c), \
135 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100136 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100137}
Stephane Eranianb6900812009-10-06 16:42:09 +0200138
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100139#define EVENT_CONSTRAINT(c, n, m) \
140 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
141
Peter Zijlstraca037702010-03-02 19:52:12 +0100142/*
143 * Constraint on the Event code.
144 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100145#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200146 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100147
Peter Zijlstraca037702010-03-02 19:52:12 +0100148/*
149 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200150 *
151 * filter mask to validate fixed counter events.
152 * the following filters disqualify for fixed counters:
153 * - inv
154 * - edge
155 * - cnt-mask
156 * The other filters are supported by fixed counters.
157 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100158 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100159#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200160 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100161
Peter Zijlstraca037702010-03-02 19:52:12 +0100162/*
163 * Constraint on the Event code + UMask
164 */
165#define PEBS_EVENT_CONSTRAINT(c, n) \
166 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
167
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100168#define EVENT_CONSTRAINT_END \
169 EVENT_CONSTRAINT(0, 0, 0)
170
171#define for_each_event_constraint(e, c) \
172 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200173
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100174union perf_capabilities {
175 struct {
176 u64 lbr_format : 6;
177 u64 pebs_trap : 1;
178 u64 pebs_arch_reg : 1;
179 u64 pebs_format : 4;
180 u64 smm_freeze : 1;
181 };
182 u64 capabilities;
183};
184
Ingo Molnar241771e2008-12-03 10:39:53 +0100185/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200186 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100187 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200188struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100189 /*
190 * Generic x86 PMC bits
191 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200192 const char *name;
193 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800194 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200195 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100196 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100197 void (*enable)(struct perf_event *);
198 void (*disable)(struct perf_event *);
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200199 int (*hw_config)(struct perf_event *event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300200 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530201 unsigned eventsel;
202 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100203 u64 (*event_map)(int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530204 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200205 int num_counters;
206 int num_counters_fixed;
207 int cntval_bits;
208 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200209 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200210 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100211 struct event_constraint *
212 (*get_event_constraints)(struct cpu_hw_events *cpuc,
213 struct perf_event *event);
214
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100215 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
216 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100217 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100218 void (*quirks)(void);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100219
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100220 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100221 void (*cpu_starting)(int cpu);
222 void (*cpu_dying)(int cpu);
223 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100224
225 /*
226 * Intel Arch Perfmon v2+
227 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100228 u64 intel_ctrl;
229 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100230
231 /*
232 * Intel DebugStore bits
233 */
234 int bts, pebs;
235 int pebs_record_size;
236 void (*drain_pebs)(struct pt_regs *regs);
237 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100238
239 /*
240 * Intel LBR
241 */
242 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
243 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530244};
245
Robert Richter4a06bd82009-04-29 12:47:11 +0200246static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530247
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200248static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100249 .enabled = 1,
250};
Ingo Molnar241771e2008-12-03 10:39:53 +0100251
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100252static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200253
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530254/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200255 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200256 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200257 * 'not supported', -1 means 'hw_event makes no sense on
258 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200259 * ID.
260 */
261
262#define C(x) PERF_COUNT_HW_CACHE_##x
263
264static u64 __read_mostly hw_cache_event_ids
265 [PERF_COUNT_HW_CACHE_MAX]
266 [PERF_COUNT_HW_CACHE_OP_MAX]
267 [PERF_COUNT_HW_CACHE_RESULT_MAX];
268
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530269/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200270 * Propagate event elapsed time into the generic event.
271 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100272 * Returns the delta events processed.
273 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200274static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100275x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100276{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100277 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200278 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200279 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100280 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200281 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100282
Markus Metzger30dd5682009-07-21 15:56:48 +0200283 if (idx == X86_PMC_IDX_FIXED_BTS)
284 return 0;
285
Ingo Molnaree060942008-12-13 09:00:03 +0100286 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200287 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100288 *
289 * Our tactic to handle this is to first atomically read and
290 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200291 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100292 */
293again:
294 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200295 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100296
297 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
298 new_raw_count) != prev_raw_count)
299 goto again;
300
301 /*
302 * Now we have the new raw value and have updated the prev
303 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200304 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100305 *
306 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200307 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100308 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200309 delta = (new_raw_count << shift) - (prev_raw_count << shift);
310 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100311
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200312 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100313 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200314
315 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100316}
317
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200318static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200319static DEFINE_MUTEX(pmc_reserve_mutex);
320
Robert Richterb27ea292010-03-17 12:49:10 +0100321#ifdef CONFIG_X86_LOCAL_APIC
322
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200323static bool reserve_pmc_hardware(void)
324{
325 int i;
326
327 if (nmi_watchdog == NMI_LOCAL_APIC)
328 disable_lapic_nmi_watchdog();
329
Robert Richter948b1bb2010-03-29 18:36:50 +0200330 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200331 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200332 goto perfctr_fail;
333 }
334
Robert Richter948b1bb2010-03-29 18:36:50 +0200335 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200336 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200337 goto eventsel_fail;
338 }
339
340 return true;
341
342eventsel_fail:
343 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200344 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200345
Robert Richter948b1bb2010-03-29 18:36:50 +0200346 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200347
348perfctr_fail:
349 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200350 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200351
352 if (nmi_watchdog == NMI_LOCAL_APIC)
353 enable_lapic_nmi_watchdog();
354
355 return false;
356}
357
358static void release_pmc_hardware(void)
359{
360 int i;
361
Robert Richter948b1bb2010-03-29 18:36:50 +0200362 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200363 release_perfctr_nmi(x86_pmu.perfctr + i);
364 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200365 }
366
367 if (nmi_watchdog == NMI_LOCAL_APIC)
368 enable_lapic_nmi_watchdog();
369}
370
Robert Richterb27ea292010-03-17 12:49:10 +0100371#else
372
373static bool reserve_pmc_hardware(void) { return true; }
374static void release_pmc_hardware(void) {}
375
376#endif
377
Peter Zijlstraca037702010-03-02 19:52:12 +0100378static int reserve_ds_buffers(void);
379static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200380
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200381static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200382{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200383 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200384 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100385 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200386 mutex_unlock(&pmc_reserve_mutex);
387 }
388}
389
Robert Richter85cf9db2009-04-29 12:47:20 +0200390static inline int x86_pmu_initialized(void)
391{
392 return x86_pmu.handle_irq != NULL;
393}
394
Ingo Molnar8326f442009-06-05 20:22:46 +0200395static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200396set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200397{
398 unsigned int cache_type, cache_op, cache_result;
399 u64 config, val;
400
401 config = attr->config;
402
403 cache_type = (config >> 0) & 0xff;
404 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
405 return -EINVAL;
406
407 cache_op = (config >> 8) & 0xff;
408 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
409 return -EINVAL;
410
411 cache_result = (config >> 16) & 0xff;
412 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
413 return -EINVAL;
414
415 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
416
417 if (val == 0)
418 return -ENOENT;
419
420 if (val == -1)
421 return -EINVAL;
422
423 hwc->config |= val;
424
425 return 0;
426}
427
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200428static int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300429{
430 /*
431 * Generate PMC IRQs:
432 * (keep 'enabled' bit clear for now)
433 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200434 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300435
436 /*
437 * Count user and OS events unless requested not to
438 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200439 if (!event->attr.exclude_user)
440 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
441 if (!event->attr.exclude_kernel)
442 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
443
444 if (event->attr.type == PERF_TYPE_RAW)
445 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300446
447 return 0;
448}
449
Ingo Molnaree060942008-12-13 09:00:03 +0100450/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200451 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100452 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200453static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100454{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200455 struct perf_event_attr *attr = &event->attr;
456 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200457 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200458 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100459
Robert Richter85cf9db2009-04-29 12:47:20 +0200460 if (!x86_pmu_initialized())
461 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100462
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200463 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200464 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200465 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200466 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200467 if (!reserve_pmc_hardware())
468 err = -EBUSY;
Stephane Eranian4b24a882010-03-17 23:21:01 +0200469 else {
Peter Zijlstraca037702010-03-02 19:52:12 +0100470 err = reserve_ds_buffers();
Stephane Eranian4b24a882010-03-17 23:21:01 +0200471 if (err)
472 release_pmc_hardware();
473 }
Markus Metzger30dd5682009-07-21 15:56:48 +0200474 }
475 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200476 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200477 mutex_unlock(&pmc_reserve_mutex);
478 }
479 if (err)
480 return err;
481
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200482 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200483
Stephane Eranianb6900812009-10-06 16:42:09 +0200484 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200485 hwc->last_cpu = -1;
486 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200487
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300488 /* Processor specifics */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200489 err = x86_pmu.hw_config(event);
Robert Richter984763c2010-03-16 17:07:33 +0100490 if (err)
491 return err;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100492
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200493 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200494 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200495 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200496 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200497 } else {
498 /*
499 * If we have a PMU initialized but no APIC
500 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200501 * events (user-space has to fall back and
502 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200503 */
504 if (!x86_pmu.apic)
505 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200506 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200507
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200508 if (attr->type == PERF_TYPE_RAW)
Ingo Molnar8326f442009-06-05 20:22:46 +0200509 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100510
Ingo Molnar8326f442009-06-05 20:22:46 +0200511 if (attr->type == PERF_TYPE_HW_CACHE)
512 return set_ext_hw_attr(hwc, attr);
513
514 if (attr->config >= x86_pmu.max_events)
515 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200516
Ingo Molnar8326f442009-06-05 20:22:46 +0200517 /*
518 * The generic map:
519 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200520 config = x86_pmu.event_map(attr->config);
521
522 if (config == 0)
523 return -ENOENT;
524
525 if (config == -1LL)
526 return -EINVAL;
527
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200528 /*
529 * Branch tracing:
530 */
531 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200532 (hwc->sample_period == 1)) {
533 /* BTS is not supported by this architecture. */
Peter Zijlstraca037702010-03-02 19:52:12 +0100534 if (!x86_pmu.bts)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200535 return -EOPNOTSUPP;
536
537 /* BTS is currently only allowed for user-mode. */
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300538 if (!attr->exclude_kernel)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200539 return -EOPNOTSUPP;
540 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200541
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200542 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200543
Ingo Molnar241771e2008-12-03 10:39:53 +0100544 return 0;
545}
546
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100547static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530548{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200549 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200550 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100551
Robert Richter948b1bb2010-03-29 18:36:50 +0200552 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100553 u64 val;
554
Robert Richter43f62012009-04-29 16:55:56 +0200555 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200556 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100557 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100558 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200559 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100560 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100561 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530562 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530563}
564
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200565void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530566{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200567 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
568
Robert Richter85cf9db2009-04-29 12:47:20 +0200569 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200570 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200571
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100572 if (!cpuc->enabled)
573 return;
574
575 cpuc->n_added = 0;
576 cpuc->enabled = 0;
577 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200578
579 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530580}
Ingo Molnar241771e2008-12-03 10:39:53 +0100581
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100582static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530583{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200584 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530585 int idx;
586
Robert Richter948b1bb2010-03-29 18:36:50 +0200587 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200588 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200589 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100590
Robert Richter43f62012009-04-29 16:55:56 +0200591 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200592 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200593
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200594 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100595 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100596 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530597 }
598}
599
Stephane Eranian1da53e02010-01-18 10:58:01 +0200600static const struct pmu pmu;
601
602static inline int is_x86_event(struct perf_event *event)
603{
604 return event->pmu == &pmu;
605}
606
607static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
608{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100609 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200610 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100611 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200612 struct hw_perf_event *hwc;
613
614 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
615
616 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100617 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
618 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200619 }
620
621 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200622 * fastpath, try to reuse previous register
623 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100624 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200625 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100626 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200627
628 /* never assigned */
629 if (hwc->idx == -1)
630 break;
631
632 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100633 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200634 break;
635
636 /* not already used */
637 if (test_bit(hwc->idx, used_mask))
638 break;
639
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100640 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200641 if (assign)
642 assign[i] = hwc->idx;
643 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100644 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200645 goto done;
646
647 /*
648 * begin slow path
649 */
650
651 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
652
653 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200654 * weight = number of possible counters
655 *
656 * 1 = most constrained, only works on one counter
657 * wmax = least constrained, works on any counter
658 *
659 * assign events to counters starting with most
660 * constrained events.
661 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200662 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200663
664 /*
665 * when fixed event counters are present,
666 * wmax is incremented by 1 to account
667 * for one more choice
668 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200669 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200670 wmax++;
671
Stephane Eranian81130702010-01-21 17:39:01 +0200672 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200673 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200674 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100675 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200676 hwc = &cpuc->event_list[i]->hw;
677
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100678 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200679 continue;
680
Akinobu Mita984b3f52010-03-05 13:41:37 -0800681 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200682 if (!test_bit(j, used_mask))
683 break;
684 }
685
686 if (j == X86_PMC_IDX_MAX)
687 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200688
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100689 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200690
Stephane Eranian1da53e02010-01-18 10:58:01 +0200691 if (assign)
692 assign[i] = j;
693 num--;
694 }
695 }
Stephane Eranian81130702010-01-21 17:39:01 +0200696done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200697 /*
698 * scheduling failed or is just a simulation,
699 * free resources if necessary
700 */
701 if (!assign || num) {
702 for (i = 0; i < n; i++) {
703 if (x86_pmu.put_event_constraints)
704 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
705 }
706 }
707 return num ? -ENOSPC : 0;
708}
709
710/*
711 * dogrp: true if must collect siblings events (group)
712 * returns total number of events and error code
713 */
714static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
715{
716 struct perf_event *event;
717 int n, max_count;
718
Robert Richter948b1bb2010-03-29 18:36:50 +0200719 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200720
721 /* current number of events already accepted */
722 n = cpuc->n_events;
723
724 if (is_x86_event(leader)) {
725 if (n >= max_count)
726 return -ENOSPC;
727 cpuc->event_list[n] = leader;
728 n++;
729 }
730 if (!dogrp)
731 return n;
732
733 list_for_each_entry(event, &leader->sibling_list, group_entry) {
734 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200735 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200736 continue;
737
738 if (n >= max_count)
739 return -ENOSPC;
740
741 cpuc->event_list[n] = event;
742 n++;
743 }
744 return n;
745}
746
Stephane Eranian1da53e02010-01-18 10:58:01 +0200747static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200748 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200749{
Stephane Eranian447a1942010-02-01 14:50:01 +0200750 struct hw_perf_event *hwc = &event->hw;
751
752 hwc->idx = cpuc->assign[i];
753 hwc->last_cpu = smp_processor_id();
754 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200755
756 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
757 hwc->config_base = 0;
758 hwc->event_base = 0;
759 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
760 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
761 /*
762 * We set it so that event_base + idx in wrmsr/rdmsr maps to
763 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
764 */
765 hwc->event_base =
766 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
767 } else {
768 hwc->config_base = x86_pmu.eventsel;
769 hwc->event_base = x86_pmu.perfctr;
770 }
771}
772
Stephane Eranian447a1942010-02-01 14:50:01 +0200773static inline int match_prev_assignment(struct hw_perf_event *hwc,
774 struct cpu_hw_events *cpuc,
775 int i)
776{
777 return hwc->idx == cpuc->assign[i] &&
778 hwc->last_cpu == smp_processor_id() &&
779 hwc->last_tag == cpuc->tags[i];
780}
781
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100782static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200783static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100784
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200785void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100786{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200787 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
788 struct perf_event *event;
789 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100790 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200791
Robert Richter85cf9db2009-04-29 12:47:20 +0200792 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100793 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100794
795 if (cpuc->enabled)
796 return;
797
Stephane Eranian1da53e02010-01-18 10:58:01 +0200798 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100799 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200800 /*
801 * apply assignment obtained either from
802 * hw_perf_group_sched_in() or x86_pmu_enable()
803 *
804 * step1: save events moving to new counters
805 * step2: reprogram moved events into new counters
806 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100807 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200808 event = cpuc->event_list[i];
809 hwc = &event->hw;
810
Stephane Eranian447a1942010-02-01 14:50:01 +0200811 /*
812 * we can avoid reprogramming counter if:
813 * - assigned same counter as last time
814 * - running on same CPU as last time
815 * - no other event has used the counter since
816 */
817 if (hwc->idx == -1 ||
818 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819 continue;
820
Stephane Eraniand76a0812010-02-08 17:06:01 +0200821 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200822 }
823
824 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200825 event = cpuc->event_list[i];
826 hwc = &event->hw;
827
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100828 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200829 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100830 else if (i < n_running)
831 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200832
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100833 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200834 }
835 cpuc->n_added = 0;
836 perf_events_lapic_init();
837 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100838
839 cpuc->enabled = 1;
840 barrier();
841
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100842 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100843}
Ingo Molnaree060942008-12-13 09:00:03 +0100844
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100845static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100846{
Peter Zijlstra7645a242010-03-08 13:51:31 +0100847 wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100848 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100849}
850
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100851static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100852{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100853 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100854
855 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100856}
857
Tejun Heo245b2e72009-06-24 15:13:48 +0900858static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100859
Ingo Molnaree060942008-12-13 09:00:03 +0100860/*
861 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200862 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100863 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200864static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100865x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100866{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100867 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100868 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200869 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100870 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100871
Markus Metzger30dd5682009-07-21 15:56:48 +0200872 if (idx == X86_PMC_IDX_FIXED_BTS)
873 return 0;
874
Ingo Molnaree060942008-12-13 09:00:03 +0100875 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200876 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100877 */
878 if (unlikely(left <= -period)) {
879 left = period;
880 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200881 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200882 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100883 }
884
885 if (unlikely(left <= 0)) {
886 left += period;
887 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200888 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200889 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100890 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200891 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200892 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200893 */
894 if (unlikely(left < 2))
895 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100896
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200897 if (left > x86_pmu.max_period)
898 left = x86_pmu.max_period;
899
Tejun Heo245b2e72009-06-24 15:13:48 +0900900 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100901
902 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200903 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100904 * mark it to be able to extra future deltas:
905 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100906 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100907
Peter Zijlstra7645a242010-03-08 13:51:31 +0100908 wrmsrl(hwc->event_base + idx,
Robert Richter948b1bb2010-03-29 18:36:50 +0200909 (u64)(-left) & x86_pmu.cntval_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200910
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200911 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200912
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200913 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100914}
915
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100916static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200917{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200918 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200919 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100920 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100921}
922
Ingo Molnaree060942008-12-13 09:00:03 +0100923/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200924 * activate a single event
925 *
926 * The event is added to the group of enabled events
927 * but only if it can be scehduled with existing events.
928 *
929 * Called with PMU disabled. If successful and return value 1,
930 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200931 */
932static int x86_pmu_enable(struct perf_event *event)
933{
934 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200935 struct hw_perf_event *hwc;
936 int assign[X86_PMC_IDX_MAX];
937 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200938
Stephane Eranian1da53e02010-01-18 10:58:01 +0200939 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200940
Stephane Eranian1da53e02010-01-18 10:58:01 +0200941 n0 = cpuc->n_events;
942 n = collect_events(cpuc, event, false);
943 if (n < 0)
944 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200945
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300946 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200947 if (ret)
948 return ret;
949 /*
950 * copy new assignment, now we know it is possible
951 * will be used by hw_perf_enable()
952 */
953 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100954
Stephane Eranian1da53e02010-01-18 10:58:01 +0200955 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100956 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100957
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100958 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100959}
960
Stephane Eraniand76a0812010-02-08 17:06:01 +0200961static int x86_pmu_start(struct perf_event *event)
962{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100963 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
964 int idx = event->hw.idx;
965
966 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200967 return -EAGAIN;
968
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100969 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100970 cpuc->events[idx] = event;
971 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100972 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100973 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200974
975 return 0;
976}
977
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200978static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200979{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100980 int ret = x86_pmu_start(event);
981 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200982}
983
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200984void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100985{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100986 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100987 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200988 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200989 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100990 int cpu, idx;
991
Robert Richter948b1bb2010-03-29 18:36:50 +0200992 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100993 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100994
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200995 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100996
997 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200998 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100999
Robert Richterfaa28ae2009-04-29 12:47:13 +02001000 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301001 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1002 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1003 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1004 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001005 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001006
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301007 pr_info("\n");
1008 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1009 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1010 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1011 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001012 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301013 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001014 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001015
Robert Richter948b1bb2010-03-29 18:36:50 +02001016 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001017 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1018 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001019
Tejun Heo245b2e72009-06-24 15:13:48 +09001020 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001021
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301022 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001023 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301024 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001025 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301026 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001027 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001028 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001029 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001030 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1031
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301032 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001033 cpu, idx, pmc_count);
1034 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001035 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001036}
1037
Stephane Eraniand76a0812010-02-08 17:06:01 +02001038static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001039{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001040 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001041 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001042 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001043
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001044 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1045 return;
1046
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001047 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001048
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001049 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001050 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001051 * that we are disabling:
1052 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001053 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001054
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001055 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001056}
1057
1058static void x86_pmu_disable(struct perf_event *event)
1059{
1060 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1061 int i;
1062
Stephane Eraniand76a0812010-02-08 17:06:01 +02001063 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001064
Stephane Eranian1da53e02010-01-18 10:58:01 +02001065 for (i = 0; i < cpuc->n_events; i++) {
1066 if (event == cpuc->event_list[i]) {
1067
1068 if (x86_pmu.put_event_constraints)
1069 x86_pmu.put_event_constraints(cpuc, event);
1070
1071 while (++i < cpuc->n_events)
1072 cpuc->event_list[i-1] = cpuc->event_list[i];
1073
1074 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001075 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001076 }
1077 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001078 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001079}
1080
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001081static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001082{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001083 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001084 struct cpu_hw_events *cpuc;
1085 struct perf_event *event;
1086 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001087 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001088 u64 val;
1089
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001090 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001091
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001092 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001093
Robert Richter948b1bb2010-03-29 18:36:50 +02001094 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001095 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001096 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001097
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001098 event = cpuc->events[idx];
1099 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001100
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001101 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001102 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001103 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001104
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001105 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001106 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001107 */
1108 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001109 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001110
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001111 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001112 continue;
1113
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001114 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001115 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001116 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001117
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001118 if (handled)
1119 inc_irq_stat(apic_perf_irqs);
1120
Robert Richtera29aa8a2009-04-29 12:47:21 +02001121 return handled;
1122}
Robert Richter39d81ea2009-04-29 12:47:05 +02001123
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001124void smp_perf_pending_interrupt(struct pt_regs *regs)
1125{
1126 irq_enter();
1127 ack_APIC_irq();
1128 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001129 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001130 irq_exit();
1131}
1132
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001133void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001134{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001135#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001136 if (!x86_pmu.apic || !x86_pmu_initialized())
1137 return;
1138
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001139 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001140#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001141}
1142
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001143void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001144{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001145 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001146 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001147
Ingo Molnar241771e2008-12-03 10:39:53 +01001148 /*
Yong Wangc323d952009-05-29 13:28:35 +08001149 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001150 */
Yong Wangc323d952009-05-29 13:28:35 +08001151 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001152}
1153
1154static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001155perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001156 unsigned long cmd, void *__args)
1157{
1158 struct die_args *args = __args;
1159 struct pt_regs *regs;
1160
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001162 return NOTIFY_DONE;
1163
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001164 switch (cmd) {
1165 case DIE_NMI:
1166 case DIE_NMI_IPI:
1167 break;
1168
1169 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001170 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001171 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001172
1173 regs = args->regs;
1174
1175 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001176 /*
1177 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001178 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001179 *
1180 * If the first NMI handles both, the latter will be empty and daze
1181 * the CPU.
1182 */
Yong Wanga3288102009-06-03 13:12:55 +08001183 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001184
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001185 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001186}
1187
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001188static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1189 .notifier_call = perf_event_nmi_handler,
1190 .next = NULL,
1191 .priority = 1
1192};
1193
Peter Zijlstra63b14642010-01-22 16:32:17 +01001194static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001195static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001196
Peter Zijlstra63b14642010-01-22 16:32:17 +01001197static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001198x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001199{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001200 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001201
Stephane Eranian1da53e02010-01-18 10:58:01 +02001202 if (x86_pmu.event_constraints) {
1203 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001204 if ((event->hw.config & c->cmask) == c->code)
1205 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001206 }
1207 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001208
1209 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001210}
1211
Stephane Eranian1da53e02010-01-18 10:58:01 +02001212static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001213 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001214{
1215 int ret = 0;
1216
1217 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001218 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001219 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1220
1221 if (!is_x86_event(event))
1222 ret = event->pmu->enable(event);
1223
1224 if (!ret && !is_software_event(event))
1225 cpuctx->active_oncpu++;
1226
1227 if (!ret && event->attr.exclusive)
1228 cpuctx->exclusive = 1;
1229
1230 return ret;
1231}
1232
1233static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001234 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001235{
1236 event->state = PERF_EVENT_STATE_INACTIVE;
1237 event->oncpu = -1;
1238
1239 if (!is_x86_event(event))
1240 event->pmu->disable(event);
1241
1242 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1243
1244 if (!is_software_event(event))
1245 cpuctx->active_oncpu--;
1246
1247 if (event->attr.exclusive || !cpuctx->active_oncpu)
1248 cpuctx->exclusive = 0;
1249}
1250
1251/*
1252 * Called to enable a whole group of events.
1253 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1254 * Assumes the caller has disabled interrupts and has
1255 * frozen the PMU with hw_perf_save_disable.
1256 *
1257 * called with PMU disabled. If successful and return value 1,
1258 * then guaranteed to call perf_enable() and hw_perf_enable()
1259 */
1260int hw_perf_group_sched_in(struct perf_event *leader,
1261 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001262 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001263{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001264 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001265 struct perf_event *sub;
1266 int assign[X86_PMC_IDX_MAX];
1267 int n0, n1, ret;
1268
Cyrill Gorcunov0b861222010-03-12 00:50:16 +03001269 if (!x86_pmu_initialized())
1270 return 0;
1271
Stephane Eranian1da53e02010-01-18 10:58:01 +02001272 /* n0 = total number of events */
1273 n0 = collect_events(cpuc, leader, true);
1274 if (n0 < 0)
1275 return n0;
1276
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001277 ret = x86_pmu.schedule_events(cpuc, n0, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001278 if (ret)
1279 return ret;
1280
Peter Zijlstra6e377382010-02-11 13:21:58 +01001281 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001282 if (ret)
1283 return ret;
1284
1285 n1 = 1;
1286 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001287 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001288 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001289 if (ret)
1290 goto undo;
1291 ++n1;
1292 }
1293 }
1294 /*
1295 * copy new assignment, now we know it is possible
1296 * will be used by hw_perf_enable()
1297 */
1298 memcpy(cpuc->assign, assign, n0*sizeof(int));
1299
1300 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001301 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001302 ctx->nr_active += n1;
1303
1304 /*
1305 * 1 means successful and events are active
1306 * This is not quite true because we defer
1307 * actual activation until hw_perf_enable() but
1308 * this way we* ensure caller won't try to enable
1309 * individual events
1310 */
1311 return 1;
1312undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001313 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001314 n0 = 1;
1315 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1316 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001317 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001318 if (++n0 == n1)
1319 break;
1320 }
1321 }
1322 return ret;
1323}
1324
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001325#include "perf_event_amd.c"
1326#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001327#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001328#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001329#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001330#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301331
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001332static int __cpuinit
1333x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1334{
1335 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001336 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001337
1338 switch (action & ~CPU_TASKS_FROZEN) {
1339 case CPU_UP_PREPARE:
1340 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001341 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001342 break;
1343
1344 case CPU_STARTING:
1345 if (x86_pmu.cpu_starting)
1346 x86_pmu.cpu_starting(cpu);
1347 break;
1348
1349 case CPU_DYING:
1350 if (x86_pmu.cpu_dying)
1351 x86_pmu.cpu_dying(cpu);
1352 break;
1353
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001354 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001355 case CPU_DEAD:
1356 if (x86_pmu.cpu_dead)
1357 x86_pmu.cpu_dead(cpu);
1358 break;
1359
1360 default:
1361 break;
1362 }
1363
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001364 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001365}
1366
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001367static void __init pmu_check_apic(void)
1368{
1369 if (cpu_has_apic)
1370 return;
1371
1372 x86_pmu.apic = 0;
1373 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1374 pr_info("no hardware sampling interrupt available.\n");
1375}
1376
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001377void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301378{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001379 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001380 int err;
1381
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001382 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001383
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301384 switch (boot_cpu_data.x86_vendor) {
1385 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001386 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301387 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301388 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001389 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301390 break;
Robert Richter41389602009-04-29 12:47:00 +02001391 default:
1392 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301393 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001394 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001395 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301396 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001397 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301398
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001399 pmu_check_apic();
1400
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001401 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001402
Peter Zijlstra3c447802010-03-04 21:49:01 +01001403 if (x86_pmu.quirks)
1404 x86_pmu.quirks();
1405
Robert Richter948b1bb2010-03-29 18:36:50 +02001406 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001407 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001408 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1409 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001410 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001411 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1412 perf_max_events = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001413
Robert Richter948b1bb2010-03-29 18:36:50 +02001414 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001415 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001416 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1417 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001418 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001419
Robert Richterd6dc0b42010-03-17 12:49:13 +01001420 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001421 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001422
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001423 perf_events_lapic_init();
1424 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001425
Peter Zijlstra63b14642010-01-22 16:32:17 +01001426 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001427 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1428 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001429
Peter Zijlstrab622d642010-02-01 15:36:30 +01001430 if (x86_pmu.event_constraints) {
1431 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001432 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001433 continue;
1434
Robert Richter948b1bb2010-03-29 18:36:50 +02001435 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1436 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001437 }
1438 }
1439
Ingo Molnar57c0c152009-09-21 12:20:38 +02001440 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001441 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1442 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1443 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001444 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001445 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001446 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001447
1448 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001449}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001450
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001451static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001452{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001453 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001454}
1455
Robert Richter4aeb0b42009-04-29 12:47:03 +02001456static const struct pmu pmu = {
1457 .enable = x86_pmu_enable,
1458 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001459 .start = x86_pmu_start,
1460 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001461 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001462 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001463};
1464
Stephane Eranian1da53e02010-01-18 10:58:01 +02001465/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001466 * validate that we can schedule this event
1467 */
1468static int validate_event(struct perf_event *event)
1469{
1470 struct cpu_hw_events *fake_cpuc;
1471 struct event_constraint *c;
1472 int ret = 0;
1473
1474 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1475 if (!fake_cpuc)
1476 return -ENOMEM;
1477
1478 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1479
1480 if (!c || !c->weight)
1481 ret = -ENOSPC;
1482
1483 if (x86_pmu.put_event_constraints)
1484 x86_pmu.put_event_constraints(fake_cpuc, event);
1485
1486 kfree(fake_cpuc);
1487
1488 return ret;
1489}
1490
1491/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001492 * validate a single event group
1493 *
1494 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001495 * - check events are compatible which each other
1496 * - events do not compete for the same counter
1497 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001498 *
1499 * validation ensures the group can be loaded onto the
1500 * PMU if it was the only group available.
1501 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001502static int validate_group(struct perf_event *event)
1503{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001504 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001505 struct cpu_hw_events *fake_cpuc;
1506 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001507
Peter Zijlstra502568d2010-01-22 14:35:46 +01001508 ret = -ENOMEM;
1509 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1510 if (!fake_cpuc)
1511 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001512
Stephane Eranian1da53e02010-01-18 10:58:01 +02001513 /*
1514 * the event is not yet connected with its
1515 * siblings therefore we must first collect
1516 * existing siblings, then add the new event
1517 * before we can simulate the scheduling
1518 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001519 ret = -ENOSPC;
1520 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001521 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001522 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001523
Peter Zijlstra502568d2010-01-22 14:35:46 +01001524 fake_cpuc->n_events = n;
1525 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001526 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001527 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001528
Peter Zijlstra502568d2010-01-22 14:35:46 +01001529 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001530
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001531 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001532
1533out_free:
1534 kfree(fake_cpuc);
1535out:
1536 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001537}
1538
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001539const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001540{
Stephane Eranian81130702010-01-21 17:39:01 +02001541 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001542 int err;
1543
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001544 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001545 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001546 /*
1547 * we temporarily connect event to its pmu
1548 * such that validate_group() can classify
1549 * it as an x86 event using is_x86_event()
1550 */
1551 tmp = event->pmu;
1552 event->pmu = &pmu;
1553
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001554 if (event->group_leader != event)
1555 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001556 else
1557 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001558
1559 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001560 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001561 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001562 if (event->destroy)
1563 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001564 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001565 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001566
Robert Richter4aeb0b42009-04-29 12:47:03 +02001567 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001568}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001569
1570/*
1571 * callchain support
1572 */
1573
1574static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001575void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001576{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001577 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001578 entry->ip[entry->nr++] = ip;
1579}
1580
Tejun Heo245b2e72009-06-24 15:13:48 +09001581static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1582static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001583
1584
1585static void
1586backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1587{
1588 /* Ignore warnings */
1589}
1590
1591static void backtrace_warning(void *data, char *msg)
1592{
1593 /* Ignore warnings */
1594}
1595
1596static int backtrace_stack(void *data, char *name)
1597{
Ingo Molnar038e8362009-06-15 09:57:59 +02001598 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001599}
1600
1601static void backtrace_address(void *data, unsigned long addr, int reliable)
1602{
1603 struct perf_callchain_entry *entry = data;
1604
Frederic Weisbecker6f4dee02010-03-18 23:47:01 +01001605 callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001606}
1607
1608static const struct stacktrace_ops backtrace_ops = {
1609 .warning = backtrace_warning,
1610 .warning_symbol = backtrace_warning_symbol,
1611 .stack = backtrace_stack,
1612 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001613 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001614};
1615
Ingo Molnar038e8362009-06-15 09:57:59 +02001616#include "../dumpstack.h"
1617
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001618static void
1619perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1620{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001621 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001622 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001623
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001624 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001625}
1626
Torok Edwin257ef9d2010-03-17 12:07:16 +02001627#ifdef CONFIG_COMPAT
1628static inline int
1629perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001630{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001631 /* 32-bit process in 64-bit kernel. */
1632 struct stack_frame_ia32 frame;
1633 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001634
Torok Edwin257ef9d2010-03-17 12:07:16 +02001635 if (!test_thread_flag(TIF_IA32))
1636 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001637
Torok Edwin257ef9d2010-03-17 12:07:16 +02001638 fp = compat_ptr(regs->bp);
1639 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1640 unsigned long bytes;
1641 frame.next_frame = 0;
1642 frame.return_address = 0;
1643
1644 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1645 if (bytes != sizeof(frame))
1646 break;
1647
1648 if (fp < compat_ptr(regs->sp))
1649 break;
1650
1651 callchain_store(entry, frame.return_address);
1652 fp = compat_ptr(frame.next_frame);
1653 }
1654 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001655}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001656#else
1657static inline int
1658perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1659{
1660 return 0;
1661}
1662#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001663
1664static void
1665perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1666{
1667 struct stack_frame frame;
1668 const void __user *fp;
1669
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001670 if (!user_mode(regs))
1671 regs = task_pt_regs(current);
1672
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001673 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001674
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001675 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001676 callchain_store(entry, regs->ip);
1677
Torok Edwin257ef9d2010-03-17 12:07:16 +02001678 if (perf_callchain_user32(regs, entry))
1679 return;
1680
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001681 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001682 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001683 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001684 frame.return_address = 0;
1685
Torok Edwin257ef9d2010-03-17 12:07:16 +02001686 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1687 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001688 break;
1689
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001690 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001691 break;
1692
1693 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001694 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001695 }
1696}
1697
1698static void
1699perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1700{
1701 int is_user;
1702
1703 if (!regs)
1704 return;
1705
1706 is_user = user_mode(regs);
1707
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001708 if (is_user && current->state != TASK_RUNNING)
1709 return;
1710
1711 if (!is_user)
1712 perf_callchain_kernel(regs, entry);
1713
1714 if (current->mm)
1715 perf_callchain_user(regs, entry);
1716}
1717
1718struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1719{
1720 struct perf_callchain_entry *entry;
1721
1722 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001723 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001724 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001725 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001726
1727 entry->nr = 0;
1728
1729 perf_do_callchain(regs, entry);
1730
1731 return entry;
1732}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001733
1734void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1735{
1736 regs->ip = ip;
1737 /*
1738 * perf_arch_fetch_caller_regs adds another call, we need to increment
1739 * the skip level
1740 */
1741 regs->bp = rewind_frame_pointer(skip + 1);
1742 regs->cs = __KERNEL_CS;
1743 local_save_flags(regs->flags);
1744}