blob: c2c1e10f7b037d746feb10115132a9fa117f672d [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020025#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020030#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020031#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020032#include <asm/compat.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010033
Peter Zijlstra7645a242010-03-08 13:51:31 +010034#if 0
35#undef wrmsrl
36#define wrmsrl(msr, val) \
37do { \
38 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
39 (unsigned long)(val)); \
40 native_write_msr((msr), (u32)((u64)(val)), \
41 (u32)((u64)(val) >> 32)); \
42} while (0)
43#endif
44
Peter Zijlstraef21f682010-03-03 13:12:23 +010045/*
46 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
47 */
48static unsigned long
49copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
50{
51 unsigned long offset, addr = (unsigned long)from;
52 int type = in_nmi() ? KM_NMI : KM_IRQ0;
53 unsigned long size, len = 0;
54 struct page *page;
55 void *map;
56 int ret;
57
58 do {
59 ret = __get_user_pages_fast(addr, 1, 0, &page);
60 if (!ret)
61 break;
62
63 offset = addr & (PAGE_SIZE - 1);
64 size = min(PAGE_SIZE - offset, n - len);
65
66 map = kmap_atomic(page, type);
67 memcpy(to, map+offset, size);
68 kunmap_atomic(map, type);
69 put_page(page);
70
71 len += size;
72 to += size;
73 addr += size;
74
75 } while (len < n);
76
77 return len;
78}
79
Stephane Eranian1da53e02010-01-18 10:58:01 +020080struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010081 union {
82 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010083 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010084 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010085 u64 code;
86 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010087 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020088};
89
Stephane Eranian38331f62010-02-08 17:17:01 +020090struct amd_nb {
91 int nb_id; /* NorthBridge id */
92 int refcnt; /* reference count */
93 struct perf_event *owners[X86_PMC_IDX_MAX];
94 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
95};
96
Peter Zijlstracaff2be2010-03-03 12:02:30 +010097#define MAX_LBR_ENTRIES 16
98
Ingo Molnarcdd6c482009-09-21 12:02:48 +020099struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +0100100 /*
101 * Generic x86 PMC bits
102 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100105 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200106
107 int n_events;
108 int n_added;
109 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200110 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200111 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100112
113 /*
114 * Intel DebugStore bits
115 */
116 struct debug_store *ds;
117 u64 pebs_enabled;
118
119 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100120 * Intel LBR bits
121 */
122 int lbr_users;
123 void *lbr_context;
124 struct perf_branch_stack lbr_stack;
125 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
126
127 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100128 * AMD specific bits
129 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200130 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100131};
132
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100133#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100134 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100135 .code = (c), \
136 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100137 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100138}
Stephane Eranianb6900812009-10-06 16:42:09 +0200139
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100140#define EVENT_CONSTRAINT(c, n, m) \
141 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
142
Peter Zijlstraca037702010-03-02 19:52:12 +0100143/*
144 * Constraint on the Event code.
145 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100146#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200147 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100148
Peter Zijlstraca037702010-03-02 19:52:12 +0100149/*
150 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200151 *
152 * filter mask to validate fixed counter events.
153 * the following filters disqualify for fixed counters:
154 * - inv
155 * - edge
156 * - cnt-mask
157 * The other filters are supported by fixed counters.
158 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100159 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100160#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200161 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100162
Peter Zijlstraca037702010-03-02 19:52:12 +0100163/*
164 * Constraint on the Event code + UMask
165 */
166#define PEBS_EVENT_CONSTRAINT(c, n) \
167 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
168
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100169#define EVENT_CONSTRAINT_END \
170 EVENT_CONSTRAINT(0, 0, 0)
171
172#define for_each_event_constraint(e, c) \
173 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200174
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100175union perf_capabilities {
176 struct {
177 u64 lbr_format : 6;
178 u64 pebs_trap : 1;
179 u64 pebs_arch_reg : 1;
180 u64 pebs_format : 4;
181 u64 smm_freeze : 1;
182 };
183 u64 capabilities;
184};
185
Ingo Molnar241771e2008-12-03 10:39:53 +0100186/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200187 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100188 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200189struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100190 /*
191 * Generic x86 PMC bits
192 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200193 const char *name;
194 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800195 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200196 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100197 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100198 void (*enable)(struct perf_event *);
199 void (*disable)(struct perf_event *);
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200200 int (*hw_config)(struct perf_event *event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300201 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530202 unsigned eventsel;
203 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100204 u64 (*event_map)(int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530205 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200206 int num_counters;
207 int num_counters_fixed;
208 int cntval_bits;
209 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200210 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200211 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100212 struct event_constraint *
213 (*get_event_constraints)(struct cpu_hw_events *cpuc,
214 struct perf_event *event);
215
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100216 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
217 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100218 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100219 void (*quirks)(void);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100220
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100221 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100222 void (*cpu_starting)(int cpu);
223 void (*cpu_dying)(int cpu);
224 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100225
226 /*
227 * Intel Arch Perfmon v2+
228 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100229 u64 intel_ctrl;
230 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100231
232 /*
233 * Intel DebugStore bits
234 */
235 int bts, pebs;
236 int pebs_record_size;
237 void (*drain_pebs)(struct pt_regs *regs);
238 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100239
240 /*
241 * Intel LBR
242 */
243 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
244 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530245};
246
Robert Richter4a06bd82009-04-29 12:47:11 +0200247static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530248
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200249static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100250 .enabled = 1,
251};
Ingo Molnar241771e2008-12-03 10:39:53 +0100252
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100253static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200254
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530255/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200256 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200257 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200258 * 'not supported', -1 means 'hw_event makes no sense on
259 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200260 * ID.
261 */
262
263#define C(x) PERF_COUNT_HW_CACHE_##x
264
265static u64 __read_mostly hw_cache_event_ids
266 [PERF_COUNT_HW_CACHE_MAX]
267 [PERF_COUNT_HW_CACHE_OP_MAX]
268 [PERF_COUNT_HW_CACHE_RESULT_MAX];
269
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530270/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200271 * Propagate event elapsed time into the generic event.
272 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100273 * Returns the delta events processed.
274 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200275static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100276x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100277{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100278 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200279 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200280 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100281 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200282 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100283
Markus Metzger30dd5682009-07-21 15:56:48 +0200284 if (idx == X86_PMC_IDX_FIXED_BTS)
285 return 0;
286
Ingo Molnaree060942008-12-13 09:00:03 +0100287 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200288 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100289 *
290 * Our tactic to handle this is to first atomically read and
291 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200292 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100293 */
294again:
295 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200296 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100297
298 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
299 new_raw_count) != prev_raw_count)
300 goto again;
301
302 /*
303 * Now we have the new raw value and have updated the prev
304 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200305 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100306 *
307 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200308 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100309 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200310 delta = (new_raw_count << shift) - (prev_raw_count << shift);
311 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100312
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200313 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100314 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200315
316 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100317}
318
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200319static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200320static DEFINE_MUTEX(pmc_reserve_mutex);
321
Robert Richterb27ea292010-03-17 12:49:10 +0100322#ifdef CONFIG_X86_LOCAL_APIC
323
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200324static bool reserve_pmc_hardware(void)
325{
326 int i;
327
328 if (nmi_watchdog == NMI_LOCAL_APIC)
329 disable_lapic_nmi_watchdog();
330
Robert Richter948b1bb2010-03-29 18:36:50 +0200331 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200332 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200333 goto perfctr_fail;
334 }
335
Robert Richter948b1bb2010-03-29 18:36:50 +0200336 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200337 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200338 goto eventsel_fail;
339 }
340
341 return true;
342
343eventsel_fail:
344 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200345 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200346
Robert Richter948b1bb2010-03-29 18:36:50 +0200347 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200348
349perfctr_fail:
350 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200351 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200352
353 if (nmi_watchdog == NMI_LOCAL_APIC)
354 enable_lapic_nmi_watchdog();
355
356 return false;
357}
358
359static void release_pmc_hardware(void)
360{
361 int i;
362
Robert Richter948b1bb2010-03-29 18:36:50 +0200363 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200364 release_perfctr_nmi(x86_pmu.perfctr + i);
365 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200366 }
367
368 if (nmi_watchdog == NMI_LOCAL_APIC)
369 enable_lapic_nmi_watchdog();
370}
371
Robert Richterb27ea292010-03-17 12:49:10 +0100372#else
373
374static bool reserve_pmc_hardware(void) { return true; }
375static void release_pmc_hardware(void) {}
376
377#endif
378
Peter Zijlstraca037702010-03-02 19:52:12 +0100379static int reserve_ds_buffers(void);
380static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200381
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200382static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200383{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200384 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200385 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100386 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200387 mutex_unlock(&pmc_reserve_mutex);
388 }
389}
390
Robert Richter85cf9db2009-04-29 12:47:20 +0200391static inline int x86_pmu_initialized(void)
392{
393 return x86_pmu.handle_irq != NULL;
394}
395
Ingo Molnar8326f442009-06-05 20:22:46 +0200396static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200397set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200398{
399 unsigned int cache_type, cache_op, cache_result;
400 u64 config, val;
401
402 config = attr->config;
403
404 cache_type = (config >> 0) & 0xff;
405 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
406 return -EINVAL;
407
408 cache_op = (config >> 8) & 0xff;
409 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
410 return -EINVAL;
411
412 cache_result = (config >> 16) & 0xff;
413 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
414 return -EINVAL;
415
416 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
417
418 if (val == 0)
419 return -ENOENT;
420
421 if (val == -1)
422 return -EINVAL;
423
424 hwc->config |= val;
425
426 return 0;
427}
428
Robert Richterc1726f32010-04-13 22:23:11 +0200429static int x86_setup_perfctr(struct perf_event *event)
430{
431 struct perf_event_attr *attr = &event->attr;
432 struct hw_perf_event *hwc = &event->hw;
433 u64 config;
434
435 if (!hwc->sample_period) {
436 hwc->sample_period = x86_pmu.max_period;
437 hwc->last_period = hwc->sample_period;
438 atomic64_set(&hwc->period_left, hwc->sample_period);
439 } else {
440 /*
441 * If we have a PMU initialized but no APIC
442 * interrupts, we cannot sample hardware
443 * events (user-space has to fall back and
444 * sample via a hrtimer based software event):
445 */
446 if (!x86_pmu.apic)
447 return -EOPNOTSUPP;
448 }
449
450 if (attr->type == PERF_TYPE_RAW)
451 return 0;
452
453 if (attr->type == PERF_TYPE_HW_CACHE)
454 return set_ext_hw_attr(hwc, attr);
455
456 if (attr->config >= x86_pmu.max_events)
457 return -EINVAL;
458
459 /*
460 * The generic map:
461 */
462 config = x86_pmu.event_map(attr->config);
463
464 if (config == 0)
465 return -ENOENT;
466
467 if (config == -1LL)
468 return -EINVAL;
469
470 /*
471 * Branch tracing:
472 */
473 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
474 (hwc->sample_period == 1)) {
475 /* BTS is not supported by this architecture. */
476 if (!x86_pmu.bts)
477 return -EOPNOTSUPP;
478
479 /* BTS is currently only allowed for user-mode. */
480 if (!attr->exclude_kernel)
481 return -EOPNOTSUPP;
482 }
483
484 hwc->config |= config;
485
486 return 0;
487}
Robert Richter4261e0e2010-04-13 22:23:10 +0200488
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200489static int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300490{
491 /*
492 * Generate PMC IRQs:
493 * (keep 'enabled' bit clear for now)
494 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200495 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300496
497 /*
498 * Count user and OS events unless requested not to
499 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200500 if (!event->attr.exclude_user)
501 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
502 if (!event->attr.exclude_kernel)
503 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
504
505 if (event->attr.type == PERF_TYPE_RAW)
506 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300507
Robert Richter9d0fcba62010-04-13 22:23:12 +0200508 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300509}
510
Ingo Molnaree060942008-12-13 09:00:03 +0100511/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200512 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100513 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200514static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100515{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200516 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100517
Robert Richter85cf9db2009-04-29 12:47:20 +0200518 if (!x86_pmu_initialized())
519 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100520
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200521 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200522 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200523 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200524 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200525 if (!reserve_pmc_hardware())
526 err = -EBUSY;
Stephane Eranian4b24a882010-03-17 23:21:01 +0200527 else {
Peter Zijlstraca037702010-03-02 19:52:12 +0100528 err = reserve_ds_buffers();
Stephane Eranian4b24a882010-03-17 23:21:01 +0200529 if (err)
530 release_pmc_hardware();
531 }
Markus Metzger30dd5682009-07-21 15:56:48 +0200532 }
533 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200534 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200535 mutex_unlock(&pmc_reserve_mutex);
536 }
537 if (err)
538 return err;
539
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200540 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200541
Robert Richter4261e0e2010-04-13 22:23:10 +0200542 event->hw.idx = -1;
543 event->hw.last_cpu = -1;
544 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200545
Robert Richter9d0fcba62010-04-13 22:23:12 +0200546 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200547}
548
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100549static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530550{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200551 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200552 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100553
Robert Richter948b1bb2010-03-29 18:36:50 +0200554 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100555 u64 val;
556
Robert Richter43f62012009-04-29 16:55:56 +0200557 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200558 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100559 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100560 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200561 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100562 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100563 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530564 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530565}
566
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200567void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530568{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200569 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
570
Robert Richter85cf9db2009-04-29 12:47:20 +0200571 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200572 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200573
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100574 if (!cpuc->enabled)
575 return;
576
577 cpuc->n_added = 0;
578 cpuc->enabled = 0;
579 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200580
581 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530582}
Ingo Molnar241771e2008-12-03 10:39:53 +0100583
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100584static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530585{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200586 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530587 int idx;
588
Robert Richter948b1bb2010-03-29 18:36:50 +0200589 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200590 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200591 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100592
Robert Richter43f62012009-04-29 16:55:56 +0200593 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200594 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200595
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200596 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100597 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100598 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530599 }
600}
601
Stephane Eranian1da53e02010-01-18 10:58:01 +0200602static const struct pmu pmu;
603
604static inline int is_x86_event(struct perf_event *event)
605{
606 return event->pmu == &pmu;
607}
608
609static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
610{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100611 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200612 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100613 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200614 struct hw_perf_event *hwc;
615
616 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
617
618 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100619 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
620 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200621 }
622
623 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200624 * fastpath, try to reuse previous register
625 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100626 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200627 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100628 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200629
630 /* never assigned */
631 if (hwc->idx == -1)
632 break;
633
634 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100635 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200636 break;
637
638 /* not already used */
639 if (test_bit(hwc->idx, used_mask))
640 break;
641
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100642 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200643 if (assign)
644 assign[i] = hwc->idx;
645 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100646 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200647 goto done;
648
649 /*
650 * begin slow path
651 */
652
653 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
654
655 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200656 * weight = number of possible counters
657 *
658 * 1 = most constrained, only works on one counter
659 * wmax = least constrained, works on any counter
660 *
661 * assign events to counters starting with most
662 * constrained events.
663 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200664 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200665
666 /*
667 * when fixed event counters are present,
668 * wmax is incremented by 1 to account
669 * for one more choice
670 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200671 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200672 wmax++;
673
Stephane Eranian81130702010-01-21 17:39:01 +0200674 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200675 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200676 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100677 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200678 hwc = &cpuc->event_list[i]->hw;
679
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100680 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200681 continue;
682
Akinobu Mita984b3f52010-03-05 13:41:37 -0800683 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200684 if (!test_bit(j, used_mask))
685 break;
686 }
687
688 if (j == X86_PMC_IDX_MAX)
689 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200690
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100691 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200692
Stephane Eranian1da53e02010-01-18 10:58:01 +0200693 if (assign)
694 assign[i] = j;
695 num--;
696 }
697 }
Stephane Eranian81130702010-01-21 17:39:01 +0200698done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200699 /*
700 * scheduling failed or is just a simulation,
701 * free resources if necessary
702 */
703 if (!assign || num) {
704 for (i = 0; i < n; i++) {
705 if (x86_pmu.put_event_constraints)
706 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
707 }
708 }
709 return num ? -ENOSPC : 0;
710}
711
712/*
713 * dogrp: true if must collect siblings events (group)
714 * returns total number of events and error code
715 */
716static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
717{
718 struct perf_event *event;
719 int n, max_count;
720
Robert Richter948b1bb2010-03-29 18:36:50 +0200721 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200722
723 /* current number of events already accepted */
724 n = cpuc->n_events;
725
726 if (is_x86_event(leader)) {
727 if (n >= max_count)
728 return -ENOSPC;
729 cpuc->event_list[n] = leader;
730 n++;
731 }
732 if (!dogrp)
733 return n;
734
735 list_for_each_entry(event, &leader->sibling_list, group_entry) {
736 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200737 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200738 continue;
739
740 if (n >= max_count)
741 return -ENOSPC;
742
743 cpuc->event_list[n] = event;
744 n++;
745 }
746 return n;
747}
748
Stephane Eranian1da53e02010-01-18 10:58:01 +0200749static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200750 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200751{
Stephane Eranian447a1942010-02-01 14:50:01 +0200752 struct hw_perf_event *hwc = &event->hw;
753
754 hwc->idx = cpuc->assign[i];
755 hwc->last_cpu = smp_processor_id();
756 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200757
758 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
759 hwc->config_base = 0;
760 hwc->event_base = 0;
761 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
762 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
763 /*
764 * We set it so that event_base + idx in wrmsr/rdmsr maps to
765 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
766 */
767 hwc->event_base =
768 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
769 } else {
770 hwc->config_base = x86_pmu.eventsel;
771 hwc->event_base = x86_pmu.perfctr;
772 }
773}
774
Stephane Eranian447a1942010-02-01 14:50:01 +0200775static inline int match_prev_assignment(struct hw_perf_event *hwc,
776 struct cpu_hw_events *cpuc,
777 int i)
778{
779 return hwc->idx == cpuc->assign[i] &&
780 hwc->last_cpu == smp_processor_id() &&
781 hwc->last_tag == cpuc->tags[i];
782}
783
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100784static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200785static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100786
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200787void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100788{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200789 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
790 struct perf_event *event;
791 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100792 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200793
Robert Richter85cf9db2009-04-29 12:47:20 +0200794 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100795 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100796
797 if (cpuc->enabled)
798 return;
799
Stephane Eranian1da53e02010-01-18 10:58:01 +0200800 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100801 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200802 /*
803 * apply assignment obtained either from
804 * hw_perf_group_sched_in() or x86_pmu_enable()
805 *
806 * step1: save events moving to new counters
807 * step2: reprogram moved events into new counters
808 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100809 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200810 event = cpuc->event_list[i];
811 hwc = &event->hw;
812
Stephane Eranian447a1942010-02-01 14:50:01 +0200813 /*
814 * we can avoid reprogramming counter if:
815 * - assigned same counter as last time
816 * - running on same CPU as last time
817 * - no other event has used the counter since
818 */
819 if (hwc->idx == -1 ||
820 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200821 continue;
822
Stephane Eraniand76a0812010-02-08 17:06:01 +0200823 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200824 }
825
826 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200827 event = cpuc->event_list[i];
828 hwc = &event->hw;
829
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100830 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200831 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100832 else if (i < n_running)
833 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200834
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100835 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200836 }
837 cpuc->n_added = 0;
838 perf_events_lapic_init();
839 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100840
841 cpuc->enabled = 1;
842 barrier();
843
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100844 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100845}
Ingo Molnaree060942008-12-13 09:00:03 +0100846
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100847static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100848{
Peter Zijlstra7645a242010-03-08 13:51:31 +0100849 wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100850 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100851}
852
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100853static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100854{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100855 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100856
857 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100858}
859
Tejun Heo245b2e72009-06-24 15:13:48 +0900860static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100861
Ingo Molnaree060942008-12-13 09:00:03 +0100862/*
863 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200864 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100865 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200866static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100867x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100868{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100869 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100870 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200871 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100872 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100873
Markus Metzger30dd5682009-07-21 15:56:48 +0200874 if (idx == X86_PMC_IDX_FIXED_BTS)
875 return 0;
876
Ingo Molnaree060942008-12-13 09:00:03 +0100877 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200878 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100879 */
880 if (unlikely(left <= -period)) {
881 left = period;
882 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200883 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200884 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100885 }
886
887 if (unlikely(left <= 0)) {
888 left += period;
889 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200890 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200891 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100892 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200893 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200894 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200895 */
896 if (unlikely(left < 2))
897 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100898
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200899 if (left > x86_pmu.max_period)
900 left = x86_pmu.max_period;
901
Tejun Heo245b2e72009-06-24 15:13:48 +0900902 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100903
904 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200905 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100906 * mark it to be able to extra future deltas:
907 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100908 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100909
Peter Zijlstra7645a242010-03-08 13:51:31 +0100910 wrmsrl(hwc->event_base + idx,
Robert Richter948b1bb2010-03-29 18:36:50 +0200911 (u64)(-left) & x86_pmu.cntval_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200912
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200913 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200914
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200915 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100916}
917
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100918static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200919{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200920 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200921 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100922 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100923}
924
Ingo Molnaree060942008-12-13 09:00:03 +0100925/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200926 * activate a single event
927 *
928 * The event is added to the group of enabled events
929 * but only if it can be scehduled with existing events.
930 *
931 * Called with PMU disabled. If successful and return value 1,
932 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200933 */
934static int x86_pmu_enable(struct perf_event *event)
935{
936 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200937 struct hw_perf_event *hwc;
938 int assign[X86_PMC_IDX_MAX];
939 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200940
Stephane Eranian1da53e02010-01-18 10:58:01 +0200941 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200942
Stephane Eranian1da53e02010-01-18 10:58:01 +0200943 n0 = cpuc->n_events;
944 n = collect_events(cpuc, event, false);
945 if (n < 0)
946 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200947
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300948 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200949 if (ret)
950 return ret;
951 /*
952 * copy new assignment, now we know it is possible
953 * will be used by hw_perf_enable()
954 */
955 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100956
Stephane Eranian1da53e02010-01-18 10:58:01 +0200957 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100958 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100959
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100960 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100961}
962
Stephane Eraniand76a0812010-02-08 17:06:01 +0200963static int x86_pmu_start(struct perf_event *event)
964{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100965 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
966 int idx = event->hw.idx;
967
968 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200969 return -EAGAIN;
970
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100971 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100972 cpuc->events[idx] = event;
973 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100974 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100975 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200976
977 return 0;
978}
979
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200980static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200981{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100982 int ret = x86_pmu_start(event);
983 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200984}
985
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100987{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100988 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100989 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200990 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200991 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100992 int cpu, idx;
993
Robert Richter948b1bb2010-03-29 18:36:50 +0200994 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100995 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100996
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200997 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100998
999 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001000 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001001
Robert Richterfaa28ae2009-04-29 12:47:13 +02001002 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301003 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1004 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1005 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1006 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001007 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001008
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301009 pr_info("\n");
1010 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1011 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1012 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1013 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001014 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301015 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001016 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001017
Robert Richter948b1bb2010-03-29 18:36:50 +02001018 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001019 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1020 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001021
Tejun Heo245b2e72009-06-24 15:13:48 +09001022 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001023
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301024 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001025 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301026 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001027 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301028 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001029 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001030 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001031 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001032 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1033
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301034 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001035 cpu, idx, pmc_count);
1036 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001037 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001038}
1039
Stephane Eraniand76a0812010-02-08 17:06:01 +02001040static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001041{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001042 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001043 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001044 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001045
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001046 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1047 return;
1048
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001049 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001050
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001051 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001052 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001053 * that we are disabling:
1054 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001055 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001056
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001057 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001058}
1059
1060static void x86_pmu_disable(struct perf_event *event)
1061{
1062 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1063 int i;
1064
Stephane Eraniand76a0812010-02-08 17:06:01 +02001065 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001066
Stephane Eranian1da53e02010-01-18 10:58:01 +02001067 for (i = 0; i < cpuc->n_events; i++) {
1068 if (event == cpuc->event_list[i]) {
1069
1070 if (x86_pmu.put_event_constraints)
1071 x86_pmu.put_event_constraints(cpuc, event);
1072
1073 while (++i < cpuc->n_events)
1074 cpuc->event_list[i-1] = cpuc->event_list[i];
1075
1076 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001077 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001078 }
1079 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001080 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001081}
1082
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001083static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001084{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001085 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001086 struct cpu_hw_events *cpuc;
1087 struct perf_event *event;
1088 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001089 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001090 u64 val;
1091
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001092 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001093
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001094 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001095
Robert Richter948b1bb2010-03-29 18:36:50 +02001096 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001097 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001098 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001099
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 event = cpuc->events[idx];
1101 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001102
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001103 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001104 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001105 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001106
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001107 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001109 */
1110 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001111 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001112
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001113 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001114 continue;
1115
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001116 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001117 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001118 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001119
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001120 if (handled)
1121 inc_irq_stat(apic_perf_irqs);
1122
Robert Richtera29aa8a2009-04-29 12:47:21 +02001123 return handled;
1124}
Robert Richter39d81ea2009-04-29 12:47:05 +02001125
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001126void smp_perf_pending_interrupt(struct pt_regs *regs)
1127{
1128 irq_enter();
1129 ack_APIC_irq();
1130 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001131 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001132 irq_exit();
1133}
1134
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001135void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001136{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001137#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001138 if (!x86_pmu.apic || !x86_pmu_initialized())
1139 return;
1140
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001141 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001142#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001143}
1144
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001145void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001146{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001147 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001148 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001149
Ingo Molnar241771e2008-12-03 10:39:53 +01001150 /*
Yong Wangc323d952009-05-29 13:28:35 +08001151 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001152 */
Yong Wangc323d952009-05-29 13:28:35 +08001153 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001154}
1155
1156static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001158 unsigned long cmd, void *__args)
1159{
1160 struct die_args *args = __args;
1161 struct pt_regs *regs;
1162
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001163 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001164 return NOTIFY_DONE;
1165
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001166 switch (cmd) {
1167 case DIE_NMI:
1168 case DIE_NMI_IPI:
1169 break;
1170
1171 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001172 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001173 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001174
1175 regs = args->regs;
1176
1177 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001178 /*
1179 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001180 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001181 *
1182 * If the first NMI handles both, the latter will be empty and daze
1183 * the CPU.
1184 */
Yong Wanga3288102009-06-03 13:12:55 +08001185 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001186
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001187 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001188}
1189
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001190static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1191 .notifier_call = perf_event_nmi_handler,
1192 .next = NULL,
1193 .priority = 1
1194};
1195
Peter Zijlstra63b14642010-01-22 16:32:17 +01001196static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001197static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001198
Peter Zijlstra63b14642010-01-22 16:32:17 +01001199static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001200x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001201{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001202 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001203
Stephane Eranian1da53e02010-01-18 10:58:01 +02001204 if (x86_pmu.event_constraints) {
1205 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001206 if ((event->hw.config & c->cmask) == c->code)
1207 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001208 }
1209 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001210
1211 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001212}
1213
Stephane Eranian1da53e02010-01-18 10:58:01 +02001214static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001215 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001216{
1217 int ret = 0;
1218
1219 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001220 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001221 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1222
1223 if (!is_x86_event(event))
1224 ret = event->pmu->enable(event);
1225
1226 if (!ret && !is_software_event(event))
1227 cpuctx->active_oncpu++;
1228
1229 if (!ret && event->attr.exclusive)
1230 cpuctx->exclusive = 1;
1231
1232 return ret;
1233}
1234
1235static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001236 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001237{
1238 event->state = PERF_EVENT_STATE_INACTIVE;
1239 event->oncpu = -1;
1240
1241 if (!is_x86_event(event))
1242 event->pmu->disable(event);
1243
1244 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1245
1246 if (!is_software_event(event))
1247 cpuctx->active_oncpu--;
1248
1249 if (event->attr.exclusive || !cpuctx->active_oncpu)
1250 cpuctx->exclusive = 0;
1251}
1252
1253/*
1254 * Called to enable a whole group of events.
1255 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1256 * Assumes the caller has disabled interrupts and has
1257 * frozen the PMU with hw_perf_save_disable.
1258 *
1259 * called with PMU disabled. If successful and return value 1,
1260 * then guaranteed to call perf_enable() and hw_perf_enable()
1261 */
1262int hw_perf_group_sched_in(struct perf_event *leader,
1263 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001264 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001265{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001266 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001267 struct perf_event *sub;
1268 int assign[X86_PMC_IDX_MAX];
1269 int n0, n1, ret;
1270
Cyrill Gorcunov0b861222010-03-12 00:50:16 +03001271 if (!x86_pmu_initialized())
1272 return 0;
1273
Stephane Eranian1da53e02010-01-18 10:58:01 +02001274 /* n0 = total number of events */
1275 n0 = collect_events(cpuc, leader, true);
1276 if (n0 < 0)
1277 return n0;
1278
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001279 ret = x86_pmu.schedule_events(cpuc, n0, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001280 if (ret)
1281 return ret;
1282
Peter Zijlstra6e377382010-02-11 13:21:58 +01001283 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001284 if (ret)
1285 return ret;
1286
1287 n1 = 1;
1288 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001289 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001290 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001291 if (ret)
1292 goto undo;
1293 ++n1;
1294 }
1295 }
1296 /*
1297 * copy new assignment, now we know it is possible
1298 * will be used by hw_perf_enable()
1299 */
1300 memcpy(cpuc->assign, assign, n0*sizeof(int));
1301
1302 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001303 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001304 ctx->nr_active += n1;
1305
1306 /*
1307 * 1 means successful and events are active
1308 * This is not quite true because we defer
1309 * actual activation until hw_perf_enable() but
1310 * this way we* ensure caller won't try to enable
1311 * individual events
1312 */
1313 return 1;
1314undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001315 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001316 n0 = 1;
1317 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1318 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001319 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001320 if (++n0 == n1)
1321 break;
1322 }
1323 }
1324 return ret;
1325}
1326
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001327#include "perf_event_amd.c"
1328#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001329#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001330#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001331#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001332#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301333
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001334static int __cpuinit
1335x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1336{
1337 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001338 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001339
1340 switch (action & ~CPU_TASKS_FROZEN) {
1341 case CPU_UP_PREPARE:
1342 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001343 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001344 break;
1345
1346 case CPU_STARTING:
1347 if (x86_pmu.cpu_starting)
1348 x86_pmu.cpu_starting(cpu);
1349 break;
1350
1351 case CPU_DYING:
1352 if (x86_pmu.cpu_dying)
1353 x86_pmu.cpu_dying(cpu);
1354 break;
1355
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001356 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001357 case CPU_DEAD:
1358 if (x86_pmu.cpu_dead)
1359 x86_pmu.cpu_dead(cpu);
1360 break;
1361
1362 default:
1363 break;
1364 }
1365
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001366 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001367}
1368
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001369static void __init pmu_check_apic(void)
1370{
1371 if (cpu_has_apic)
1372 return;
1373
1374 x86_pmu.apic = 0;
1375 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1376 pr_info("no hardware sampling interrupt available.\n");
1377}
1378
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001379void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301380{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001381 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001382 int err;
1383
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001385
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301386 switch (boot_cpu_data.x86_vendor) {
1387 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001388 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301389 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301390 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001391 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301392 break;
Robert Richter41389602009-04-29 12:47:00 +02001393 default:
1394 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301395 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001396 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001397 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301398 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001399 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301400
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001401 pmu_check_apic();
1402
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001403 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001404
Peter Zijlstra3c447802010-03-04 21:49:01 +01001405 if (x86_pmu.quirks)
1406 x86_pmu.quirks();
1407
Robert Richter948b1bb2010-03-29 18:36:50 +02001408 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001409 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001410 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1411 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001412 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001413 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1414 perf_max_events = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001415
Robert Richter948b1bb2010-03-29 18:36:50 +02001416 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001418 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1419 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001420 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001421
Robert Richterd6dc0b42010-03-17 12:49:13 +01001422 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001423 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001424
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001425 perf_events_lapic_init();
1426 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001427
Peter Zijlstra63b14642010-01-22 16:32:17 +01001428 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001429 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1430 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001431
Peter Zijlstrab622d642010-02-01 15:36:30 +01001432 if (x86_pmu.event_constraints) {
1433 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001434 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001435 continue;
1436
Robert Richter948b1bb2010-03-29 18:36:50 +02001437 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1438 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001439 }
1440 }
1441
Ingo Molnar57c0c152009-09-21 12:20:38 +02001442 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001443 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1444 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1445 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001446 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001447 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001448 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001449
1450 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001451}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001452
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001454{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001455 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001456}
1457
Robert Richter4aeb0b42009-04-29 12:47:03 +02001458static const struct pmu pmu = {
1459 .enable = x86_pmu_enable,
1460 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001461 .start = x86_pmu_start,
1462 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001463 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001464 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001465};
1466
Stephane Eranian1da53e02010-01-18 10:58:01 +02001467/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001468 * validate that we can schedule this event
1469 */
1470static int validate_event(struct perf_event *event)
1471{
1472 struct cpu_hw_events *fake_cpuc;
1473 struct event_constraint *c;
1474 int ret = 0;
1475
1476 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1477 if (!fake_cpuc)
1478 return -ENOMEM;
1479
1480 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1481
1482 if (!c || !c->weight)
1483 ret = -ENOSPC;
1484
1485 if (x86_pmu.put_event_constraints)
1486 x86_pmu.put_event_constraints(fake_cpuc, event);
1487
1488 kfree(fake_cpuc);
1489
1490 return ret;
1491}
1492
1493/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001494 * validate a single event group
1495 *
1496 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001497 * - check events are compatible which each other
1498 * - events do not compete for the same counter
1499 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001500 *
1501 * validation ensures the group can be loaded onto the
1502 * PMU if it was the only group available.
1503 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001504static int validate_group(struct perf_event *event)
1505{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001506 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001507 struct cpu_hw_events *fake_cpuc;
1508 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001509
Peter Zijlstra502568d2010-01-22 14:35:46 +01001510 ret = -ENOMEM;
1511 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1512 if (!fake_cpuc)
1513 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001514
Stephane Eranian1da53e02010-01-18 10:58:01 +02001515 /*
1516 * the event is not yet connected with its
1517 * siblings therefore we must first collect
1518 * existing siblings, then add the new event
1519 * before we can simulate the scheduling
1520 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001521 ret = -ENOSPC;
1522 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001523 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001524 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001525
Peter Zijlstra502568d2010-01-22 14:35:46 +01001526 fake_cpuc->n_events = n;
1527 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001528 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001529 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001530
Peter Zijlstra502568d2010-01-22 14:35:46 +01001531 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001532
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001533 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001534
1535out_free:
1536 kfree(fake_cpuc);
1537out:
1538 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001539}
1540
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001541const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001542{
Stephane Eranian81130702010-01-21 17:39:01 +02001543 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001544 int err;
1545
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001547 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001548 /*
1549 * we temporarily connect event to its pmu
1550 * such that validate_group() can classify
1551 * it as an x86 event using is_x86_event()
1552 */
1553 tmp = event->pmu;
1554 event->pmu = &pmu;
1555
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001556 if (event->group_leader != event)
1557 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001558 else
1559 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001560
1561 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001562 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001563 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001564 if (event->destroy)
1565 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001566 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001567 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001568
Robert Richter4aeb0b42009-04-29 12:47:03 +02001569 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001570}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001571
1572/*
1573 * callchain support
1574 */
1575
1576static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001577void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001578{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001579 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001580 entry->ip[entry->nr++] = ip;
1581}
1582
Tejun Heo245b2e72009-06-24 15:13:48 +09001583static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1584static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001585
1586
1587static void
1588backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1589{
1590 /* Ignore warnings */
1591}
1592
1593static void backtrace_warning(void *data, char *msg)
1594{
1595 /* Ignore warnings */
1596}
1597
1598static int backtrace_stack(void *data, char *name)
1599{
Ingo Molnar038e8362009-06-15 09:57:59 +02001600 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001601}
1602
1603static void backtrace_address(void *data, unsigned long addr, int reliable)
1604{
1605 struct perf_callchain_entry *entry = data;
1606
Frederic Weisbecker6f4dee02010-03-18 23:47:01 +01001607 callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001608}
1609
1610static const struct stacktrace_ops backtrace_ops = {
1611 .warning = backtrace_warning,
1612 .warning_symbol = backtrace_warning_symbol,
1613 .stack = backtrace_stack,
1614 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001615 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001616};
1617
Ingo Molnar038e8362009-06-15 09:57:59 +02001618#include "../dumpstack.h"
1619
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001620static void
1621perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1622{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001623 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001624 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001625
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001626 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001627}
1628
Torok Edwin257ef9d2010-03-17 12:07:16 +02001629#ifdef CONFIG_COMPAT
1630static inline int
1631perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001632{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001633 /* 32-bit process in 64-bit kernel. */
1634 struct stack_frame_ia32 frame;
1635 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001636
Torok Edwin257ef9d2010-03-17 12:07:16 +02001637 if (!test_thread_flag(TIF_IA32))
1638 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001639
Torok Edwin257ef9d2010-03-17 12:07:16 +02001640 fp = compat_ptr(regs->bp);
1641 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1642 unsigned long bytes;
1643 frame.next_frame = 0;
1644 frame.return_address = 0;
1645
1646 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1647 if (bytes != sizeof(frame))
1648 break;
1649
1650 if (fp < compat_ptr(regs->sp))
1651 break;
1652
1653 callchain_store(entry, frame.return_address);
1654 fp = compat_ptr(frame.next_frame);
1655 }
1656 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001657}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001658#else
1659static inline int
1660perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1661{
1662 return 0;
1663}
1664#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001665
1666static void
1667perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1668{
1669 struct stack_frame frame;
1670 const void __user *fp;
1671
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001672 if (!user_mode(regs))
1673 regs = task_pt_regs(current);
1674
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001675 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001676
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001677 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001678 callchain_store(entry, regs->ip);
1679
Torok Edwin257ef9d2010-03-17 12:07:16 +02001680 if (perf_callchain_user32(regs, entry))
1681 return;
1682
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001683 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001684 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001685 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001686 frame.return_address = 0;
1687
Torok Edwin257ef9d2010-03-17 12:07:16 +02001688 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1689 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001690 break;
1691
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001692 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001693 break;
1694
1695 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001696 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001697 }
1698}
1699
1700static void
1701perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1702{
1703 int is_user;
1704
1705 if (!regs)
1706 return;
1707
1708 is_user = user_mode(regs);
1709
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001710 if (is_user && current->state != TASK_RUNNING)
1711 return;
1712
1713 if (!is_user)
1714 perf_callchain_kernel(regs, entry);
1715
1716 if (current->mm)
1717 perf_callchain_user(regs, entry);
1718}
1719
1720struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1721{
1722 struct perf_callchain_entry *entry;
1723
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001724 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1725 /* TODO: We don't support guest os callchain now */
1726 return NULL;
1727 }
1728
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001729 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001730 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001731 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001732 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001733
1734 entry->nr = 0;
1735
1736 perf_do_callchain(regs, entry);
1737
1738 return entry;
1739}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001740
1741void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1742{
1743 regs->ip = ip;
1744 /*
1745 * perf_arch_fetch_caller_regs adds another call, we need to increment
1746 * the skip level
1747 */
1748 regs->bp = rewind_frame_pointer(skip + 1);
1749 regs->cs = __KERNEL_CS;
1750 local_save_flags(regs->flags);
1751}
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001752
1753unsigned long perf_instruction_pointer(struct pt_regs *regs)
1754{
1755 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001756
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001757 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1758 ip = perf_guest_cbs->get_guest_ip();
1759 else
1760 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001761
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001762 return ip;
1763}
1764
1765unsigned long perf_misc_flags(struct pt_regs *regs)
1766{
1767 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001768
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001769 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001770 if (perf_guest_cbs->is_user_mode())
1771 misc |= PERF_RECORD_MISC_GUEST_USER;
1772 else
1773 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1774 } else {
1775 if (user_mode(regs))
1776 misc |= PERF_RECORD_MISC_USER;
1777 else
1778 misc |= PERF_RECORD_MISC_KERNEL;
1779 }
1780
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001781 if (regs->flags & PERF_EFLAGS_EXACT)
1782 misc |= PERF_RECORD_MISC_EXACT;
1783
1784 return misc;
1785}