blob: 7b5430b2efe7af74832179a84e6466cceed1ddd0 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Peter Zijlstraef21f682010-03-03 13:12:23 +010032/*
33 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
34 */
35static unsigned long
36copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
37{
38 unsigned long offset, addr = (unsigned long)from;
39 int type = in_nmi() ? KM_NMI : KM_IRQ0;
40 unsigned long size, len = 0;
41 struct page *page;
42 void *map;
43 int ret;
44
45 do {
46 ret = __get_user_pages_fast(addr, 1, 0, &page);
47 if (!ret)
48 break;
49
50 offset = addr & (PAGE_SIZE - 1);
51 size = min(PAGE_SIZE - offset, n - len);
52
53 map = kmap_atomic(page, type);
54 memcpy(to, map+offset, size);
55 kunmap_atomic(map, type);
56 put_page(page);
57
58 len += size;
59 to += size;
60 addr += size;
61
62 } while (len < n);
63
64 return len;
65}
66
Ingo Molnarcdd6c482009-09-21 12:02:48 +020067static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010068
Stephane Eranian1da53e02010-01-18 10:58:01 +020069struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010070 union {
71 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010072 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010073 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010074 u64 code;
75 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010076 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020077};
78
Stephane Eranian38331f62010-02-08 17:17:01 +020079struct amd_nb {
80 int nb_id; /* NorthBridge id */
81 int refcnt; /* reference count */
82 struct perf_event *owners[X86_PMC_IDX_MAX];
83 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
84};
85
Peter Zijlstracaff2be2010-03-03 12:02:30 +010086#define MAX_LBR_ENTRIES 16
87
Ingo Molnarcdd6c482009-09-21 12:02:48 +020088struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +010089 /*
90 * Generic x86 PMC bits
91 */
Stephane Eranian1da53e02010-01-18 10:58:01 +020092 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020093 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010094 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010095 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +020096
97 int n_events;
98 int n_added;
99 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200100 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200101 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100102
103 /*
104 * Intel DebugStore bits
105 */
106 struct debug_store *ds;
107 u64 pebs_enabled;
108
109 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100110 * Intel LBR bits
111 */
112 int lbr_users;
113 void *lbr_context;
114 struct perf_branch_stack lbr_stack;
115 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
116
117 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100118 * AMD specific bits
119 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200120 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100121};
122
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100123#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100124 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100125 .code = (c), \
126 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100127 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100128}
Stephane Eranianb6900812009-10-06 16:42:09 +0200129
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100130#define EVENT_CONSTRAINT(c, n, m) \
131 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
132
Peter Zijlstraca037702010-03-02 19:52:12 +0100133/*
134 * Constraint on the Event code.
135 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100136#define INTEL_EVENT_CONSTRAINT(c, n) \
137 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100138
Peter Zijlstraca037702010-03-02 19:52:12 +0100139/*
140 * Constraint on the Event code + UMask + fixed-mask
141 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100142#define FIXED_EVENT_CONSTRAINT(c, n) \
Peter Zijlstrab622d642010-02-01 15:36:30 +0100143 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100144
Peter Zijlstraca037702010-03-02 19:52:12 +0100145/*
146 * Constraint on the Event code + UMask
147 */
148#define PEBS_EVENT_CONSTRAINT(c, n) \
149 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
150
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100151#define EVENT_CONSTRAINT_END \
152 EVENT_CONSTRAINT(0, 0, 0)
153
154#define for_each_event_constraint(e, c) \
155 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200156
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100157union perf_capabilities {
158 struct {
159 u64 lbr_format : 6;
160 u64 pebs_trap : 1;
161 u64 pebs_arch_reg : 1;
162 u64 pebs_format : 4;
163 u64 smm_freeze : 1;
164 };
165 u64 capabilities;
166};
167
Ingo Molnar241771e2008-12-03 10:39:53 +0100168/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200169 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100170 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200171struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100172 /*
173 * Generic x86 PMC bits
174 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200175 const char *name;
176 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800177 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200178 void (*disable_all)(void);
179 void (*enable_all)(void);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100180 void (*enable)(struct perf_event *);
181 void (*disable)(struct perf_event *);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530182 unsigned eventsel;
183 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100184 u64 (*event_map)(int);
185 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530186 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200187 int num_events;
188 int num_events_fixed;
189 int event_bits;
190 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200191 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200192 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100193 struct event_constraint *
194 (*get_event_constraints)(struct cpu_hw_events *cpuc,
195 struct perf_event *event);
196
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100197 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
198 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100199 struct event_constraint *event_constraints;
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100200
201 void (*cpu_prepare)(int cpu);
202 void (*cpu_starting)(int cpu);
203 void (*cpu_dying)(int cpu);
204 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100205
206 /*
207 * Intel Arch Perfmon v2+
208 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100209 u64 intel_ctrl;
210 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100211
212 /*
213 * Intel DebugStore bits
214 */
215 int bts, pebs;
216 int pebs_record_size;
217 void (*drain_pebs)(struct pt_regs *regs);
218 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100219
220 /*
221 * Intel LBR
222 */
223 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
224 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530225};
226
Robert Richter4a06bd82009-04-29 12:47:11 +0200227static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530228
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200229static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100230 .enabled = 1,
231};
Ingo Molnar241771e2008-12-03 10:39:53 +0100232
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100233static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200234
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530235/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200236 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200237 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200238 * 'not supported', -1 means 'hw_event makes no sense on
239 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200240 * ID.
241 */
242
243#define C(x) PERF_COUNT_HW_CACHE_##x
244
245static u64 __read_mostly hw_cache_event_ids
246 [PERF_COUNT_HW_CACHE_MAX]
247 [PERF_COUNT_HW_CACHE_OP_MAX]
248 [PERF_COUNT_HW_CACHE_RESULT_MAX];
249
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530250/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200251 * Propagate event elapsed time into the generic event.
252 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100253 * Returns the delta events processed.
254 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200255static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100256x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100257{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100258 struct hw_perf_event *hwc = &event->hw;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200259 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200260 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100261 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200262 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100263
Markus Metzger30dd5682009-07-21 15:56:48 +0200264 if (idx == X86_PMC_IDX_FIXED_BTS)
265 return 0;
266
Ingo Molnaree060942008-12-13 09:00:03 +0100267 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200268 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100269 *
270 * Our tactic to handle this is to first atomically read and
271 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200272 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100273 */
274again:
275 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200276 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100277
278 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
279 new_raw_count) != prev_raw_count)
280 goto again;
281
282 /*
283 * Now we have the new raw value and have updated the prev
284 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200285 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100286 *
287 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200288 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100289 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200290 delta = (new_raw_count << shift) - (prev_raw_count << shift);
291 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100292
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200293 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100294 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200295
296 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100297}
298
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200299static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200300static DEFINE_MUTEX(pmc_reserve_mutex);
301
302static bool reserve_pmc_hardware(void)
303{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200304#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200305 int i;
306
307 if (nmi_watchdog == NMI_LOCAL_APIC)
308 disable_lapic_nmi_watchdog();
309
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200310 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200311 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200312 goto perfctr_fail;
313 }
314
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200315 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200316 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200317 goto eventsel_fail;
318 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200319#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200320
321 return true;
322
Ingo Molnar04da8a42009-08-11 10:40:08 +0200323#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200324eventsel_fail:
325 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200326 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200327
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200328 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200329
330perfctr_fail:
331 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200332 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200333
334 if (nmi_watchdog == NMI_LOCAL_APIC)
335 enable_lapic_nmi_watchdog();
336
337 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200338#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200339}
340
341static void release_pmc_hardware(void)
342{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200343#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200344 int i;
345
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200346 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200347 release_perfctr_nmi(x86_pmu.perfctr + i);
348 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200349 }
350
351 if (nmi_watchdog == NMI_LOCAL_APIC)
352 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200353#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200354}
355
Peter Zijlstraca037702010-03-02 19:52:12 +0100356static int reserve_ds_buffers(void);
357static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200358
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200359static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200360{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200361 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200362 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100363 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200364 mutex_unlock(&pmc_reserve_mutex);
365 }
366}
367
Robert Richter85cf9db2009-04-29 12:47:20 +0200368static inline int x86_pmu_initialized(void)
369{
370 return x86_pmu.handle_irq != NULL;
371}
372
Ingo Molnar8326f442009-06-05 20:22:46 +0200373static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200374set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200375{
376 unsigned int cache_type, cache_op, cache_result;
377 u64 config, val;
378
379 config = attr->config;
380
381 cache_type = (config >> 0) & 0xff;
382 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
383 return -EINVAL;
384
385 cache_op = (config >> 8) & 0xff;
386 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
387 return -EINVAL;
388
389 cache_result = (config >> 16) & 0xff;
390 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
391 return -EINVAL;
392
393 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
394
395 if (val == 0)
396 return -ENOENT;
397
398 if (val == -1)
399 return -EINVAL;
400
401 hwc->config |= val;
402
403 return 0;
404}
405
Ingo Molnaree060942008-12-13 09:00:03 +0100406/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200407 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100408 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200409static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100410{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200411 struct perf_event_attr *attr = &event->attr;
412 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200413 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200414 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100415
Robert Richter85cf9db2009-04-29 12:47:20 +0200416 if (!x86_pmu_initialized())
417 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100418
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200419 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200420 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200421 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200422 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200423 if (!reserve_pmc_hardware())
424 err = -EBUSY;
425 else
Peter Zijlstraca037702010-03-02 19:52:12 +0100426 err = reserve_ds_buffers();
Markus Metzger30dd5682009-07-21 15:56:48 +0200427 }
428 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200429 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200430 mutex_unlock(&pmc_reserve_mutex);
431 }
432 if (err)
433 return err;
434
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200435 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200436
Ingo Molnar241771e2008-12-03 10:39:53 +0100437 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100438 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100439 * (keep 'enabled' bit clear for now)
440 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100441 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100442
Stephane Eranianb6900812009-10-06 16:42:09 +0200443 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200444 hwc->last_cpu = -1;
445 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200446
Ingo Molnar241771e2008-12-03 10:39:53 +0100447 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100448 * Count user and OS events unless requested not to.
449 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200450 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100451 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200452 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100453 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
454
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200455 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200456 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200457 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200458 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200459 } else {
460 /*
461 * If we have a PMU initialized but no APIC
462 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200463 * events (user-space has to fall back and
464 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200465 */
466 if (!x86_pmu.apic)
467 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200468 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200469
Ingo Molnar241771e2008-12-03 10:39:53 +0100470 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200471 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100472 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200473 if (attr->type == PERF_TYPE_RAW) {
474 hwc->config |= x86_pmu.raw_event(attr->config);
Peter Zijlstra320ebf02010-03-02 12:35:37 +0100475 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
476 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
477 return -EACCES;
Ingo Molnar8326f442009-06-05 20:22:46 +0200478 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100479 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100480
Ingo Molnar8326f442009-06-05 20:22:46 +0200481 if (attr->type == PERF_TYPE_HW_CACHE)
482 return set_ext_hw_attr(hwc, attr);
483
484 if (attr->config >= x86_pmu.max_events)
485 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200486
Ingo Molnar8326f442009-06-05 20:22:46 +0200487 /*
488 * The generic map:
489 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200490 config = x86_pmu.event_map(attr->config);
491
492 if (config == 0)
493 return -ENOENT;
494
495 if (config == -1LL)
496 return -EINVAL;
497
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200498 /*
499 * Branch tracing:
500 */
501 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200502 (hwc->sample_period == 1)) {
503 /* BTS is not supported by this architecture. */
Peter Zijlstraca037702010-03-02 19:52:12 +0100504 if (!x86_pmu.bts)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200505 return -EOPNOTSUPP;
506
507 /* BTS is currently only allowed for user-mode. */
508 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
509 return -EOPNOTSUPP;
510 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200511
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200512 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200513
Ingo Molnar241771e2008-12-03 10:39:53 +0100514 return 0;
515}
516
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100517static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530518{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200519 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200520 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100521
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200522 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100523 u64 val;
524
Robert Richter43f62012009-04-29 16:55:56 +0200525 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200526 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100527 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100528 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200529 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100530 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100531 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530532 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530533}
534
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200535void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530536{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200537 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
538
Robert Richter85cf9db2009-04-29 12:47:20 +0200539 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200540 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200541
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100542 if (!cpuc->enabled)
543 return;
544
545 cpuc->n_added = 0;
546 cpuc->enabled = 0;
547 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200548
549 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530550}
Ingo Molnar241771e2008-12-03 10:39:53 +0100551
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100552static void x86_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530553{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200554 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530555 int idx;
556
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200557 for (idx = 0; idx < x86_pmu.num_events; idx++) {
558 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200559 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100560
Robert Richter43f62012009-04-29 16:55:56 +0200561 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200562 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200563
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200564 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100565 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100566 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530567 }
568}
569
Stephane Eranian1da53e02010-01-18 10:58:01 +0200570static const struct pmu pmu;
571
572static inline int is_x86_event(struct perf_event *event)
573{
574 return event->pmu == &pmu;
575}
576
577static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
578{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100579 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200580 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100581 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200582 struct hw_perf_event *hwc;
583
584 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
585
586 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100587 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
588 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200589 }
590
591 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200592 * fastpath, try to reuse previous register
593 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100594 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200595 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100596 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200597
598 /* never assigned */
599 if (hwc->idx == -1)
600 break;
601
602 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100603 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200604 break;
605
606 /* not already used */
607 if (test_bit(hwc->idx, used_mask))
608 break;
609
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100610 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200611 if (assign)
612 assign[i] = hwc->idx;
613 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100614 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200615 goto done;
616
617 /*
618 * begin slow path
619 */
620
621 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
622
623 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200624 * weight = number of possible counters
625 *
626 * 1 = most constrained, only works on one counter
627 * wmax = least constrained, works on any counter
628 *
629 * assign events to counters starting with most
630 * constrained events.
631 */
632 wmax = x86_pmu.num_events;
633
634 /*
635 * when fixed event counters are present,
636 * wmax is incremented by 1 to account
637 * for one more choice
638 */
639 if (x86_pmu.num_events_fixed)
640 wmax++;
641
Stephane Eranian81130702010-01-21 17:39:01 +0200642 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200643 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200644 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100645 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200646 hwc = &cpuc->event_list[i]->hw;
647
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100648 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200649 continue;
650
Akinobu Mita984b3f52010-03-05 13:41:37 -0800651 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200652 if (!test_bit(j, used_mask))
653 break;
654 }
655
656 if (j == X86_PMC_IDX_MAX)
657 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200658
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100659 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200660
Stephane Eranian1da53e02010-01-18 10:58:01 +0200661 if (assign)
662 assign[i] = j;
663 num--;
664 }
665 }
Stephane Eranian81130702010-01-21 17:39:01 +0200666done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200667 /*
668 * scheduling failed or is just a simulation,
669 * free resources if necessary
670 */
671 if (!assign || num) {
672 for (i = 0; i < n; i++) {
673 if (x86_pmu.put_event_constraints)
674 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
675 }
676 }
677 return num ? -ENOSPC : 0;
678}
679
680/*
681 * dogrp: true if must collect siblings events (group)
682 * returns total number of events and error code
683 */
684static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
685{
686 struct perf_event *event;
687 int n, max_count;
688
689 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
690
691 /* current number of events already accepted */
692 n = cpuc->n_events;
693
694 if (is_x86_event(leader)) {
695 if (n >= max_count)
696 return -ENOSPC;
697 cpuc->event_list[n] = leader;
698 n++;
699 }
700 if (!dogrp)
701 return n;
702
703 list_for_each_entry(event, &leader->sibling_list, group_entry) {
704 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200705 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200706 continue;
707
708 if (n >= max_count)
709 return -ENOSPC;
710
711 cpuc->event_list[n] = event;
712 n++;
713 }
714 return n;
715}
716
Stephane Eranian1da53e02010-01-18 10:58:01 +0200717static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200718 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200719{
Stephane Eranian447a1942010-02-01 14:50:01 +0200720 struct hw_perf_event *hwc = &event->hw;
721
722 hwc->idx = cpuc->assign[i];
723 hwc->last_cpu = smp_processor_id();
724 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200725
726 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
727 hwc->config_base = 0;
728 hwc->event_base = 0;
729 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
730 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
731 /*
732 * We set it so that event_base + idx in wrmsr/rdmsr maps to
733 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
734 */
735 hwc->event_base =
736 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
737 } else {
738 hwc->config_base = x86_pmu.eventsel;
739 hwc->event_base = x86_pmu.perfctr;
740 }
741}
742
Stephane Eranian447a1942010-02-01 14:50:01 +0200743static inline int match_prev_assignment(struct hw_perf_event *hwc,
744 struct cpu_hw_events *cpuc,
745 int i)
746{
747 return hwc->idx == cpuc->assign[i] &&
748 hwc->last_cpu == smp_processor_id() &&
749 hwc->last_tag == cpuc->tags[i];
750}
751
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100752static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200753static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100754
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200755void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100756{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200757 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
758 struct perf_event *event;
759 struct hw_perf_event *hwc;
760 int i;
761
Robert Richter85cf9db2009-04-29 12:47:20 +0200762 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100763 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100764
765 if (cpuc->enabled)
766 return;
767
Stephane Eranian1da53e02010-01-18 10:58:01 +0200768 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100769 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200770 /*
771 * apply assignment obtained either from
772 * hw_perf_group_sched_in() or x86_pmu_enable()
773 *
774 * step1: save events moving to new counters
775 * step2: reprogram moved events into new counters
776 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100777 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200778
779 event = cpuc->event_list[i];
780 hwc = &event->hw;
781
Stephane Eranian447a1942010-02-01 14:50:01 +0200782 /*
783 * we can avoid reprogramming counter if:
784 * - assigned same counter as last time
785 * - running on same CPU as last time
786 * - no other event has used the counter since
787 */
788 if (hwc->idx == -1 ||
789 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200790 continue;
791
Stephane Eraniand76a0812010-02-08 17:06:01 +0200792 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200793
794 hwc->idx = -1;
795 }
796
797 for (i = 0; i < cpuc->n_events; i++) {
798
799 event = cpuc->event_list[i];
800 hwc = &event->hw;
801
Peter Zijlstraf3d46b22010-03-06 13:24:58 +0100802 if (i < n_running &&
803 match_prev_assignment(hwc, cpuc, i))
804 continue;
805
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100806 if (hwc->idx == -1)
Stephane Eranian447a1942010-02-01 14:50:01 +0200807 x86_assign_hw_event(event, cpuc, i);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200808
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100809 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200810 }
811 cpuc->n_added = 0;
812 perf_events_lapic_init();
813 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100814
815 cpuc->enabled = 1;
816 barrier();
817
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200818 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100819}
Ingo Molnaree060942008-12-13 09:00:03 +0100820
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100821static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100822{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100823 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100824 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100825}
826
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100827static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100828{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100829 struct hw_perf_event *hwc = &event->hw;
830 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100831}
832
Tejun Heo245b2e72009-06-24 15:13:48 +0900833static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100834
Ingo Molnaree060942008-12-13 09:00:03 +0100835/*
836 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200837 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100838 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200839static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100840x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100841{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100842 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100843 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200844 s64 period = hwc->sample_period;
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100845 int err, ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100846
Markus Metzger30dd5682009-07-21 15:56:48 +0200847 if (idx == X86_PMC_IDX_FIXED_BTS)
848 return 0;
849
Ingo Molnaree060942008-12-13 09:00:03 +0100850 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200851 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100852 */
853 if (unlikely(left <= -period)) {
854 left = period;
855 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200856 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200857 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100858 }
859
860 if (unlikely(left <= 0)) {
861 left += period;
862 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200863 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200864 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100865 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200866 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200867 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200868 */
869 if (unlikely(left < 2))
870 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100871
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200872 if (left > x86_pmu.max_period)
873 left = x86_pmu.max_period;
874
Tejun Heo245b2e72009-06-24 15:13:48 +0900875 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100876
877 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200878 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100879 * mark it to be able to extra future deltas:
880 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100881 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100882
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200883 err = checking_wrmsrl(hwc->event_base + idx,
884 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200885
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200886 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200887
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200888 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100889}
890
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100891static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200892{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200893 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200894 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100895 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100896}
897
Ingo Molnaree060942008-12-13 09:00:03 +0100898/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200899 * activate a single event
900 *
901 * The event is added to the group of enabled events
902 * but only if it can be scehduled with existing events.
903 *
904 * Called with PMU disabled. If successful and return value 1,
905 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200906 */
907static int x86_pmu_enable(struct perf_event *event)
908{
909 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200910 struct hw_perf_event *hwc;
911 int assign[X86_PMC_IDX_MAX];
912 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200913
Stephane Eranian1da53e02010-01-18 10:58:01 +0200914 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200915
Stephane Eranian1da53e02010-01-18 10:58:01 +0200916 n0 = cpuc->n_events;
917 n = collect_events(cpuc, event, false);
918 if (n < 0)
919 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200920
Stephane Eranian1da53e02010-01-18 10:58:01 +0200921 ret = x86_schedule_events(cpuc, n, assign);
922 if (ret)
923 return ret;
924 /*
925 * copy new assignment, now we know it is possible
926 * will be used by hw_perf_enable()
927 */
928 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100929
Stephane Eranian1da53e02010-01-18 10:58:01 +0200930 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100931 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100932
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100933 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100934}
935
Stephane Eraniand76a0812010-02-08 17:06:01 +0200936static int x86_pmu_start(struct perf_event *event)
937{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100938 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
939 int idx = event->hw.idx;
940
941 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200942 return -EAGAIN;
943
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100944 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100945 cpuc->events[idx] = event;
946 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100947 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100948 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200949
950 return 0;
951}
952
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200953static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200954{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100955 int ret = x86_pmu_start(event);
956 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200957}
958
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200959void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100960{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100961 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100962 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200963 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200964 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100965 int cpu, idx;
966
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200967 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +0100968 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100969
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200970 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100971
972 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200973 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100974
Robert Richterfaa28ae2009-04-29 12:47:13 +0200975 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530976 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
977 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
978 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
979 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100980 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100981
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530982 pr_info("\n");
983 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
984 pr_info("CPU#%d: status: %016llx\n", cpu, status);
985 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
986 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100987 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530988 }
Stephane Eranian1da53e02010-01-18 10:58:01 +0200989 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +0100990
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200991 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200992 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
993 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100994
Tejun Heo245b2e72009-06-24 15:13:48 +0900995 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100996
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530997 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100998 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530999 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001000 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301001 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001002 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001003 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001004 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001005 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1006
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301007 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001008 cpu, idx, pmc_count);
1009 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001010 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001011}
1012
Stephane Eraniand76a0812010-02-08 17:06:01 +02001013static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001014{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001015 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001016 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001017 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001018
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001019 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1020 return;
1021
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001022 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001023
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001024 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001025 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001026 * that we are disabling:
1027 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001028 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001029
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001030 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001031}
1032
1033static void x86_pmu_disable(struct perf_event *event)
1034{
1035 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1036 int i;
1037
Stephane Eraniand76a0812010-02-08 17:06:01 +02001038 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001039
Stephane Eranian1da53e02010-01-18 10:58:01 +02001040 for (i = 0; i < cpuc->n_events; i++) {
1041 if (event == cpuc->event_list[i]) {
1042
1043 if (x86_pmu.put_event_constraints)
1044 x86_pmu.put_event_constraints(cpuc, event);
1045
1046 while (++i < cpuc->n_events)
1047 cpuc->event_list[i-1] = cpuc->event_list[i];
1048
1049 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001050 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001051 }
1052 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001053 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001054}
1055
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001056static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001057{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001058 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001059 struct cpu_hw_events *cpuc;
1060 struct perf_event *event;
1061 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001062 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001063 u64 val;
1064
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001065 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001066
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001067 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001068
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001069 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001070 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001071 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001072
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001073 event = cpuc->events[idx];
1074 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001075
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001076 val = x86_perf_event_update(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001077 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001078 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001079
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001080 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001081 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001082 */
1083 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001084 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001085
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001086 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001087 continue;
1088
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001089 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001090 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001091 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001092
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001093 if (handled)
1094 inc_irq_stat(apic_perf_irqs);
1095
Robert Richtera29aa8a2009-04-29 12:47:21 +02001096 return handled;
1097}
Robert Richter39d81ea2009-04-29 12:47:05 +02001098
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001099void smp_perf_pending_interrupt(struct pt_regs *regs)
1100{
1101 irq_enter();
1102 ack_APIC_irq();
1103 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001104 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001105 irq_exit();
1106}
1107
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001109{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001110#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001111 if (!x86_pmu.apic || !x86_pmu_initialized())
1112 return;
1113
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001114 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001115#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001116}
1117
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001118void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001119{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001120#ifdef CONFIG_X86_LOCAL_APIC
1121 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001122 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001123
Ingo Molnar241771e2008-12-03 10:39:53 +01001124 /*
Yong Wangc323d952009-05-29 13:28:35 +08001125 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001126 */
Yong Wangc323d952009-05-29 13:28:35 +08001127 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001128#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001129}
1130
1131static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001132perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001133 unsigned long cmd, void *__args)
1134{
1135 struct die_args *args = __args;
1136 struct pt_regs *regs;
1137
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001138 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001139 return NOTIFY_DONE;
1140
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001141 switch (cmd) {
1142 case DIE_NMI:
1143 case DIE_NMI_IPI:
1144 break;
1145
1146 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001147 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001148 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001149
1150 regs = args->regs;
1151
Ingo Molnar04da8a42009-08-11 10:40:08 +02001152#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001153 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001154#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001155 /*
1156 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001158 *
1159 * If the first NMI handles both, the latter will be empty and daze
1160 * the CPU.
1161 */
Yong Wanga3288102009-06-03 13:12:55 +08001162 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001163
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001164 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001165}
1166
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001167static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1168 .notifier_call = perf_event_nmi_handler,
1169 .next = NULL,
1170 .priority = 1
1171};
1172
Peter Zijlstra63b14642010-01-22 16:32:17 +01001173static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001174static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001175
Peter Zijlstra63b14642010-01-22 16:32:17 +01001176static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001177x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001178{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001179 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001180
Stephane Eranian1da53e02010-01-18 10:58:01 +02001181 if (x86_pmu.event_constraints) {
1182 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001183 if ((event->hw.config & c->cmask) == c->code)
1184 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001185 }
1186 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001187
1188 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001189}
1190
Stephane Eranian1da53e02010-01-18 10:58:01 +02001191static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001192 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001193{
1194 int ret = 0;
1195
1196 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001197 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001198 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1199
1200 if (!is_x86_event(event))
1201 ret = event->pmu->enable(event);
1202
1203 if (!ret && !is_software_event(event))
1204 cpuctx->active_oncpu++;
1205
1206 if (!ret && event->attr.exclusive)
1207 cpuctx->exclusive = 1;
1208
1209 return ret;
1210}
1211
1212static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001213 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001214{
1215 event->state = PERF_EVENT_STATE_INACTIVE;
1216 event->oncpu = -1;
1217
1218 if (!is_x86_event(event))
1219 event->pmu->disable(event);
1220
1221 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1222
1223 if (!is_software_event(event))
1224 cpuctx->active_oncpu--;
1225
1226 if (event->attr.exclusive || !cpuctx->active_oncpu)
1227 cpuctx->exclusive = 0;
1228}
1229
1230/*
1231 * Called to enable a whole group of events.
1232 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1233 * Assumes the caller has disabled interrupts and has
1234 * frozen the PMU with hw_perf_save_disable.
1235 *
1236 * called with PMU disabled. If successful and return value 1,
1237 * then guaranteed to call perf_enable() and hw_perf_enable()
1238 */
1239int hw_perf_group_sched_in(struct perf_event *leader,
1240 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001241 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001242{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001243 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001244 struct perf_event *sub;
1245 int assign[X86_PMC_IDX_MAX];
1246 int n0, n1, ret;
1247
1248 /* n0 = total number of events */
1249 n0 = collect_events(cpuc, leader, true);
1250 if (n0 < 0)
1251 return n0;
1252
1253 ret = x86_schedule_events(cpuc, n0, assign);
1254 if (ret)
1255 return ret;
1256
Peter Zijlstra6e377382010-02-11 13:21:58 +01001257 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001258 if (ret)
1259 return ret;
1260
1261 n1 = 1;
1262 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001263 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001264 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001265 if (ret)
1266 goto undo;
1267 ++n1;
1268 }
1269 }
1270 /*
1271 * copy new assignment, now we know it is possible
1272 * will be used by hw_perf_enable()
1273 */
1274 memcpy(cpuc->assign, assign, n0*sizeof(int));
1275
1276 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001277 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001278 ctx->nr_active += n1;
1279
1280 /*
1281 * 1 means successful and events are active
1282 * This is not quite true because we defer
1283 * actual activation until hw_perf_enable() but
1284 * this way we* ensure caller won't try to enable
1285 * individual events
1286 */
1287 return 1;
1288undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001289 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001290 n0 = 1;
1291 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1292 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001293 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001294 if (++n0 == n1)
1295 break;
1296 }
1297 }
1298 return ret;
1299}
1300
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001301#include "perf_event_amd.c"
1302#include "perf_event_p6.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001303#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001304#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001305#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301306
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001307static int __cpuinit
1308x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1309{
1310 unsigned int cpu = (long)hcpu;
1311
1312 switch (action & ~CPU_TASKS_FROZEN) {
1313 case CPU_UP_PREPARE:
1314 if (x86_pmu.cpu_prepare)
1315 x86_pmu.cpu_prepare(cpu);
1316 break;
1317
1318 case CPU_STARTING:
1319 if (x86_pmu.cpu_starting)
1320 x86_pmu.cpu_starting(cpu);
1321 break;
1322
1323 case CPU_DYING:
1324 if (x86_pmu.cpu_dying)
1325 x86_pmu.cpu_dying(cpu);
1326 break;
1327
1328 case CPU_DEAD:
1329 if (x86_pmu.cpu_dead)
1330 x86_pmu.cpu_dead(cpu);
1331 break;
1332
1333 default:
1334 break;
1335 }
1336
1337 return NOTIFY_OK;
1338}
1339
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001340static void __init pmu_check_apic(void)
1341{
1342 if (cpu_has_apic)
1343 return;
1344
1345 x86_pmu.apic = 0;
1346 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1347 pr_info("no hardware sampling interrupt available.\n");
1348}
1349
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001350void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301351{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001352 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001353 int err;
1354
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001355 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001356
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301357 switch (boot_cpu_data.x86_vendor) {
1358 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001359 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301360 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301361 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001362 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301363 break;
Robert Richter41389602009-04-29 12:47:00 +02001364 default:
1365 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301366 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001367 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001368 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301369 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001370 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301371
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001372 pmu_check_apic();
1373
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001374 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001375
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1377 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1378 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1379 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001380 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001381 perf_event_mask = (1 << x86_pmu.num_events) - 1;
1382 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01001383
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1385 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1386 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1387 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001388 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001389
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390 perf_event_mask |=
1391 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
1392 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001393
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001394 perf_events_lapic_init();
1395 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001396
Peter Zijlstra63b14642010-01-22 16:32:17 +01001397 unconstrained = (struct event_constraint)
Peter Zijlstrafce877e2010-01-29 13:25:12 +01001398 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1399 0, x86_pmu.num_events);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001400
Peter Zijlstrab622d642010-02-01 15:36:30 +01001401 if (x86_pmu.event_constraints) {
1402 for_each_event_constraint(c, x86_pmu.event_constraints) {
1403 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1404 continue;
1405
1406 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1407 c->weight += x86_pmu.num_events;
1408 }
1409 }
1410
Ingo Molnar57c0c152009-09-21 12:20:38 +02001411 pr_info("... version: %d\n", x86_pmu.version);
1412 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1413 pr_info("... generic registers: %d\n", x86_pmu.num_events);
1414 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
1415 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1416 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1417 pr_info("... event mask: %016Lx\n", perf_event_mask);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001418
1419 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001420}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001421
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001422static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001423{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001424 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001425}
1426
Robert Richter4aeb0b42009-04-29 12:47:03 +02001427static const struct pmu pmu = {
1428 .enable = x86_pmu_enable,
1429 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001430 .start = x86_pmu_start,
1431 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001432 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001433 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001434};
1435
Stephane Eranian1da53e02010-01-18 10:58:01 +02001436/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001437 * validate that we can schedule this event
1438 */
1439static int validate_event(struct perf_event *event)
1440{
1441 struct cpu_hw_events *fake_cpuc;
1442 struct event_constraint *c;
1443 int ret = 0;
1444
1445 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1446 if (!fake_cpuc)
1447 return -ENOMEM;
1448
1449 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1450
1451 if (!c || !c->weight)
1452 ret = -ENOSPC;
1453
1454 if (x86_pmu.put_event_constraints)
1455 x86_pmu.put_event_constraints(fake_cpuc, event);
1456
1457 kfree(fake_cpuc);
1458
1459 return ret;
1460}
1461
1462/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001463 * validate a single event group
1464 *
1465 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001466 * - check events are compatible which each other
1467 * - events do not compete for the same counter
1468 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001469 *
1470 * validation ensures the group can be loaded onto the
1471 * PMU if it was the only group available.
1472 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001473static int validate_group(struct perf_event *event)
1474{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001475 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001476 struct cpu_hw_events *fake_cpuc;
1477 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001478
Peter Zijlstra502568d2010-01-22 14:35:46 +01001479 ret = -ENOMEM;
1480 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1481 if (!fake_cpuc)
1482 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001483
Stephane Eranian1da53e02010-01-18 10:58:01 +02001484 /*
1485 * the event is not yet connected with its
1486 * siblings therefore we must first collect
1487 * existing siblings, then add the new event
1488 * before we can simulate the scheduling
1489 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001490 ret = -ENOSPC;
1491 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001492 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001493 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001494
Peter Zijlstra502568d2010-01-22 14:35:46 +01001495 fake_cpuc->n_events = n;
1496 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001497 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001498 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001499
Peter Zijlstra502568d2010-01-22 14:35:46 +01001500 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001501
Peter Zijlstra502568d2010-01-22 14:35:46 +01001502 ret = x86_schedule_events(fake_cpuc, n, NULL);
1503
1504out_free:
1505 kfree(fake_cpuc);
1506out:
1507 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001508}
1509
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001510const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001511{
Stephane Eranian81130702010-01-21 17:39:01 +02001512 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001513 int err;
1514
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001515 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001516 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001517 /*
1518 * we temporarily connect event to its pmu
1519 * such that validate_group() can classify
1520 * it as an x86 event using is_x86_event()
1521 */
1522 tmp = event->pmu;
1523 event->pmu = &pmu;
1524
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001525 if (event->group_leader != event)
1526 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001527 else
1528 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001529
1530 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001531 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001532 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001533 if (event->destroy)
1534 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001535 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001536 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001537
Robert Richter4aeb0b42009-04-29 12:47:03 +02001538 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001539}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001540
1541/*
1542 * callchain support
1543 */
1544
1545static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001546void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001547{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001548 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001549 entry->ip[entry->nr++] = ip;
1550}
1551
Tejun Heo245b2e72009-06-24 15:13:48 +09001552static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1553static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001554
1555
1556static void
1557backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1558{
1559 /* Ignore warnings */
1560}
1561
1562static void backtrace_warning(void *data, char *msg)
1563{
1564 /* Ignore warnings */
1565}
1566
1567static int backtrace_stack(void *data, char *name)
1568{
Ingo Molnar038e8362009-06-15 09:57:59 +02001569 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001570}
1571
1572static void backtrace_address(void *data, unsigned long addr, int reliable)
1573{
1574 struct perf_callchain_entry *entry = data;
1575
1576 if (reliable)
1577 callchain_store(entry, addr);
1578}
1579
1580static const struct stacktrace_ops backtrace_ops = {
1581 .warning = backtrace_warning,
1582 .warning_symbol = backtrace_warning_symbol,
1583 .stack = backtrace_stack,
1584 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001585 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001586};
1587
Ingo Molnar038e8362009-06-15 09:57:59 +02001588#include "../dumpstack.h"
1589
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001590static void
1591perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1592{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001593 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001594 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001595
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001596 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001597}
1598
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001599static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1600{
1601 unsigned long bytes;
1602
1603 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1604
1605 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001606}
1607
1608static void
1609perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1610{
1611 struct stack_frame frame;
1612 const void __user *fp;
1613
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001614 if (!user_mode(regs))
1615 regs = task_pt_regs(current);
1616
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001617 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001618
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001619 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001620 callchain_store(entry, regs->ip);
1621
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001622 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001623 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001624 frame.return_address = 0;
1625
1626 if (!copy_stack_frame(fp, &frame))
1627 break;
1628
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001629 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001630 break;
1631
1632 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001633 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001634 }
1635}
1636
1637static void
1638perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1639{
1640 int is_user;
1641
1642 if (!regs)
1643 return;
1644
1645 is_user = user_mode(regs);
1646
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001647 if (is_user && current->state != TASK_RUNNING)
1648 return;
1649
1650 if (!is_user)
1651 perf_callchain_kernel(regs, entry);
1652
1653 if (current->mm)
1654 perf_callchain_user(regs, entry);
1655}
1656
1657struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1658{
1659 struct perf_callchain_entry *entry;
1660
1661 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001662 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001663 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001664 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001665
1666 entry->nr = 0;
1667
1668 perf_do_callchain(regs, entry);
1669
1670 return entry;
1671}