blob: 6531b4bdb22d26e69fbb1fc93cd704e6dd482c58 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010033
Ingo Molnarcdd6c482009-09-21 12:02:48 +020034/* The maximal number of PEBS events: */
35#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020036
37/* The size of a BTS record in bytes: */
38#define BTS_RECORD_SIZE 24
39
40/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020041#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020042
43/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020044#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020045
46
47/*
48 * Bits in the debugctlmsr controlling branch tracing.
49 */
50#define X86_DEBUGCTL_TR (1 << 6)
51#define X86_DEBUGCTL_BTS (1 << 7)
52#define X86_DEBUGCTL_BTINT (1 << 8)
53#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
54#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55
56/*
57 * A debug store configuration.
58 *
59 * We only support architectures that use 64bit fields.
60 */
61struct debug_store {
62 u64 bts_buffer_base;
63 u64 bts_index;
64 u64 bts_absolute_maximum;
65 u64 bts_interrupt_threshold;
66 u64 pebs_buffer_base;
67 u64 pebs_index;
68 u64 pebs_absolute_maximum;
69 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020070 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020071};
72
Stephane Eranian1da53e02010-01-18 10:58:01 +020073struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010074 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1];
77 };
Stephane Eranian1da53e02010-01-18 10:58:01 +020078 int code;
79 int cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010080 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020081};
82
Stephane Eranian38331f62010-02-08 17:17:01 +020083struct amd_nb {
84 int nb_id; /* NorthBridge id */
85 int refcnt; /* reference count */
86 struct perf_event *owners[X86_PMC_IDX_MAX];
87 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
88};
89
Ingo Molnarcdd6c482009-09-21 12:02:48 +020090struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020091 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020092 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010093 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010094 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020095 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020096
97 int n_events;
98 int n_added;
99 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200100 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200101 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Stephane Eranian38331f62010-02-08 17:17:01 +0200102 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100103};
104
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100105#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100106 { .idxmsk64[0] = (n) }, \
107 .code = (c), \
108 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100109 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100110}
Stephane Eranianb6900812009-10-06 16:42:09 +0200111
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100112#define EVENT_CONSTRAINT(c, n, m) \
113 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
114
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100115#define INTEL_EVENT_CONSTRAINT(c, n) \
116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100117
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100120
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0)
123
124#define for_each_event_constraint(e, c) \
125 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200126
Ingo Molnar241771e2008-12-03 10:39:53 +0100127/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200128 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100129 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200130struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200131 const char *name;
132 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800133 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200134 void (*disable_all)(void);
135 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200136 void (*enable)(struct hw_perf_event *, int);
137 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530138 unsigned eventsel;
139 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100140 u64 (*event_map)(int);
141 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530142 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200143 int num_events;
144 int num_events_fixed;
145 int event_bits;
146 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200147 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200148 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200149 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200150 void (*enable_bts)(u64 config);
151 void (*disable_bts)(void);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100152
153 struct event_constraint *
154 (*get_event_constraints)(struct cpu_hw_events *cpuc,
155 struct perf_event *event);
156
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100159 struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530160};
161
Robert Richter4a06bd82009-04-29 12:47:11 +0200162static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530163
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200164static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100165 .enabled = 1,
166};
Ingo Molnar241771e2008-12-03 10:39:53 +0100167
Stephane Eranian1da53e02010-01-18 10:58:01 +0200168static int x86_perf_event_set_period(struct perf_event *event,
169 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200170
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530171/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200172 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200173 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200174 * 'not supported', -1 means 'hw_event makes no sense on
175 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200176 * ID.
177 */
178
179#define C(x) PERF_COUNT_HW_CACHE_##x
180
181static u64 __read_mostly hw_cache_event_ids
182 [PERF_COUNT_HW_CACHE_MAX]
183 [PERF_COUNT_HW_CACHE_OP_MAX]
184 [PERF_COUNT_HW_CACHE_RESULT_MAX];
185
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530186/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200187 * Propagate event elapsed time into the generic event.
188 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100189 * Returns the delta events processed.
190 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200191static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200192x86_perf_event_update(struct perf_event *event,
193 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100194{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200195 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200196 u64 prev_raw_count, new_raw_count;
197 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100198
Markus Metzger30dd5682009-07-21 15:56:48 +0200199 if (idx == X86_PMC_IDX_FIXED_BTS)
200 return 0;
201
Ingo Molnaree060942008-12-13 09:00:03 +0100202 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200203 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100204 *
205 * Our tactic to handle this is to first atomically read and
206 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200207 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100208 */
209again:
210 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200211 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100212
213 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
214 new_raw_count) != prev_raw_count)
215 goto again;
216
217 /*
218 * Now we have the new raw value and have updated the prev
219 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200220 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100221 *
222 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200223 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100224 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200225 delta = (new_raw_count << shift) - (prev_raw_count << shift);
226 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100227
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200228 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100229 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200230
231 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100232}
233
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200234static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200235static DEFINE_MUTEX(pmc_reserve_mutex);
236
237static bool reserve_pmc_hardware(void)
238{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200239#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200240 int i;
241
242 if (nmi_watchdog == NMI_LOCAL_APIC)
243 disable_lapic_nmi_watchdog();
244
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200245 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200246 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200247 goto perfctr_fail;
248 }
249
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200250 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200251 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200252 goto eventsel_fail;
253 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200254#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200255
256 return true;
257
Ingo Molnar04da8a42009-08-11 10:40:08 +0200258#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200259eventsel_fail:
260 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200261 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200262
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200263 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200264
265perfctr_fail:
266 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200267 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200268
269 if (nmi_watchdog == NMI_LOCAL_APIC)
270 enable_lapic_nmi_watchdog();
271
272 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200273#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200274}
275
276static void release_pmc_hardware(void)
277{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200278#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200279 int i;
280
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200281 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200282 release_perfctr_nmi(x86_pmu.perfctr + i);
283 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200284 }
285
286 if (nmi_watchdog == NMI_LOCAL_APIC)
287 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200288#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200289}
290
Markus Metzger30dd5682009-07-21 15:56:48 +0200291static inline bool bts_available(void)
292{
293 return x86_pmu.enable_bts != NULL;
294}
295
296static inline void init_debug_store_on_cpu(int cpu)
297{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200298 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200299
300 if (!ds)
301 return;
302
303 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200304 (u32)((u64)(unsigned long)ds),
305 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200306}
307
308static inline void fini_debug_store_on_cpu(int cpu)
309{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200310 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200311 return;
312
313 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
314}
315
316static void release_bts_hardware(void)
317{
318 int cpu;
319
320 if (!bts_available())
321 return;
322
323 get_online_cpus();
324
325 for_each_online_cpu(cpu)
326 fini_debug_store_on_cpu(cpu);
327
328 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200329 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200330
331 if (!ds)
332 continue;
333
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200334 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200335
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200336 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200337 kfree(ds);
338 }
339
340 put_online_cpus();
341}
342
343static int reserve_bts_hardware(void)
344{
345 int cpu, err = 0;
346
347 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200348 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200349
350 get_online_cpus();
351
352 for_each_possible_cpu(cpu) {
353 struct debug_store *ds;
354 void *buffer;
355
356 err = -ENOMEM;
357 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
358 if (unlikely(!buffer))
359 break;
360
361 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
362 if (unlikely(!ds)) {
363 kfree(buffer);
364 break;
365 }
366
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200367 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200368 ds->bts_index = ds->bts_buffer_base;
369 ds->bts_absolute_maximum =
370 ds->bts_buffer_base + BTS_BUFFER_SIZE;
371 ds->bts_interrupt_threshold =
372 ds->bts_absolute_maximum - BTS_OVFL_TH;
373
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200374 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200375 err = 0;
376 }
377
378 if (err)
379 release_bts_hardware();
380 else {
381 for_each_online_cpu(cpu)
382 init_debug_store_on_cpu(cpu);
383 }
384
385 put_online_cpus();
386
387 return err;
388}
389
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200390static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200391{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200392 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200393 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200394 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200395 mutex_unlock(&pmc_reserve_mutex);
396 }
397}
398
Robert Richter85cf9db2009-04-29 12:47:20 +0200399static inline int x86_pmu_initialized(void)
400{
401 return x86_pmu.handle_irq != NULL;
402}
403
Ingo Molnar8326f442009-06-05 20:22:46 +0200404static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200405set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200406{
407 unsigned int cache_type, cache_op, cache_result;
408 u64 config, val;
409
410 config = attr->config;
411
412 cache_type = (config >> 0) & 0xff;
413 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
414 return -EINVAL;
415
416 cache_op = (config >> 8) & 0xff;
417 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
418 return -EINVAL;
419
420 cache_result = (config >> 16) & 0xff;
421 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
422 return -EINVAL;
423
424 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
425
426 if (val == 0)
427 return -ENOENT;
428
429 if (val == -1)
430 return -EINVAL;
431
432 hwc->config |= val;
433
434 return 0;
435}
436
Ingo Molnaree060942008-12-13 09:00:03 +0100437/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200438 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100439 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200440static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100441{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200442 struct perf_event_attr *attr = &event->attr;
443 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200444 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200445 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100446
Robert Richter85cf9db2009-04-29 12:47:20 +0200447 if (!x86_pmu_initialized())
448 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100449
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200450 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200451 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200452 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200453 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200454 if (!reserve_pmc_hardware())
455 err = -EBUSY;
456 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200457 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200458 }
459 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200460 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200461 mutex_unlock(&pmc_reserve_mutex);
462 }
463 if (err)
464 return err;
465
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200466 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200467
Ingo Molnar241771e2008-12-03 10:39:53 +0100468 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100469 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100470 * (keep 'enabled' bit clear for now)
471 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100472 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100473
Stephane Eranianb6900812009-10-06 16:42:09 +0200474 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200475 hwc->last_cpu = -1;
476 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200477
Ingo Molnar241771e2008-12-03 10:39:53 +0100478 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100479 * Count user and OS events unless requested not to.
480 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200481 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100482 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200483 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100484 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
485
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200486 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200487 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200488 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200489 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200490 } else {
491 /*
492 * If we have a PMU initialized but no APIC
493 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200494 * events (user-space has to fall back and
495 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200496 */
497 if (!x86_pmu.apic)
498 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200499 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200500
Ingo Molnar241771e2008-12-03 10:39:53 +0100501 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200502 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100503 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200504 if (attr->type == PERF_TYPE_RAW) {
505 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +0200506 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100507 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100508
Ingo Molnar8326f442009-06-05 20:22:46 +0200509 if (attr->type == PERF_TYPE_HW_CACHE)
510 return set_ext_hw_attr(hwc, attr);
511
512 if (attr->config >= x86_pmu.max_events)
513 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200514
Ingo Molnar8326f442009-06-05 20:22:46 +0200515 /*
516 * The generic map:
517 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200518 config = x86_pmu.event_map(attr->config);
519
520 if (config == 0)
521 return -ENOENT;
522
523 if (config == -1LL)
524 return -EINVAL;
525
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200526 /*
527 * Branch tracing:
528 */
529 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200530 (hwc->sample_period == 1)) {
531 /* BTS is not supported by this architecture. */
532 if (!bts_available())
533 return -EOPNOTSUPP;
534
535 /* BTS is currently only allowed for user-mode. */
536 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
537 return -EOPNOTSUPP;
538 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200539
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200540 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200541
Ingo Molnar241771e2008-12-03 10:39:53 +0100542 return 0;
543}
544
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100545static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530546{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200547 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200548 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100549
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200550 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100551 u64 val;
552
Robert Richter43f62012009-04-29 16:55:56 +0200553 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200554 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100555 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100556 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200557 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100558 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100559 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530560 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530561}
562
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200563void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530564{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200565 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
566
Robert Richter85cf9db2009-04-29 12:47:20 +0200567 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200568 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200569
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100570 if (!cpuc->enabled)
571 return;
572
573 cpuc->n_added = 0;
574 cpuc->enabled = 0;
575 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200576
577 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530578}
Ingo Molnar241771e2008-12-03 10:39:53 +0100579
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100580static void x86_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530581{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200582 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530583 int idx;
584
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200585 for (idx = 0; idx < x86_pmu.num_events; idx++) {
586 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200587 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100588
Robert Richter43f62012009-04-29 16:55:56 +0200589 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200590 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200591
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200592 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100593 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100594 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530595 }
596}
597
Stephane Eranian1da53e02010-01-18 10:58:01 +0200598static const struct pmu pmu;
599
600static inline int is_x86_event(struct perf_event *event)
601{
602 return event->pmu == &pmu;
603}
604
605static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
606{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100607 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200608 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100609 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200610 struct hw_perf_event *hwc;
611
612 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
613
614 for (i = 0; i < n; i++) {
Peter Zijlstra63b14642010-01-22 16:32:17 +0100615 constraints[i] =
616 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200617 }
618
619 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200620 * fastpath, try to reuse previous register
621 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100622 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200623 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100624 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200625
626 /* never assigned */
627 if (hwc->idx == -1)
628 break;
629
630 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100631 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200632 break;
633
634 /* not already used */
635 if (test_bit(hwc->idx, used_mask))
636 break;
637
Stephane Eranian81130702010-01-21 17:39:01 +0200638 set_bit(hwc->idx, used_mask);
639 if (assign)
640 assign[i] = hwc->idx;
641 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100642 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200643 goto done;
644
645 /*
646 * begin slow path
647 */
648
649 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
650
651 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200652 * weight = number of possible counters
653 *
654 * 1 = most constrained, only works on one counter
655 * wmax = least constrained, works on any counter
656 *
657 * assign events to counters starting with most
658 * constrained events.
659 */
660 wmax = x86_pmu.num_events;
661
662 /*
663 * when fixed event counters are present,
664 * wmax is incremented by 1 to account
665 * for one more choice
666 */
667 if (x86_pmu.num_events_fixed)
668 wmax++;
669
Stephane Eranian81130702010-01-21 17:39:01 +0200670 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200671 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200672 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100673 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200674 hwc = &cpuc->event_list[i]->hw;
675
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100676 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200677 continue;
678
Peter Zijlstra63b14642010-01-22 16:32:17 +0100679 for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200680 if (!test_bit(j, used_mask))
681 break;
682 }
683
684 if (j == X86_PMC_IDX_MAX)
685 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200686
Stephane Eranian81130702010-01-21 17:39:01 +0200687 set_bit(j, used_mask);
688
Stephane Eranian1da53e02010-01-18 10:58:01 +0200689 if (assign)
690 assign[i] = j;
691 num--;
692 }
693 }
Stephane Eranian81130702010-01-21 17:39:01 +0200694done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200695 /*
696 * scheduling failed or is just a simulation,
697 * free resources if necessary
698 */
699 if (!assign || num) {
700 for (i = 0; i < n; i++) {
701 if (x86_pmu.put_event_constraints)
702 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
703 }
704 }
705 return num ? -ENOSPC : 0;
706}
707
708/*
709 * dogrp: true if must collect siblings events (group)
710 * returns total number of events and error code
711 */
712static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
713{
714 struct perf_event *event;
715 int n, max_count;
716
717 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
718
719 /* current number of events already accepted */
720 n = cpuc->n_events;
721
722 if (is_x86_event(leader)) {
723 if (n >= max_count)
724 return -ENOSPC;
725 cpuc->event_list[n] = leader;
726 n++;
727 }
728 if (!dogrp)
729 return n;
730
731 list_for_each_entry(event, &leader->sibling_list, group_entry) {
732 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200733 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200734 continue;
735
736 if (n >= max_count)
737 return -ENOSPC;
738
739 cpuc->event_list[n] = event;
740 n++;
741 }
742 return n;
743}
744
Stephane Eranian1da53e02010-01-18 10:58:01 +0200745static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200746 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200747{
Stephane Eranian447a1942010-02-01 14:50:01 +0200748 struct hw_perf_event *hwc = &event->hw;
749
750 hwc->idx = cpuc->assign[i];
751 hwc->last_cpu = smp_processor_id();
752 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200753
754 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
755 hwc->config_base = 0;
756 hwc->event_base = 0;
757 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
758 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
759 /*
760 * We set it so that event_base + idx in wrmsr/rdmsr maps to
761 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
762 */
763 hwc->event_base =
764 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
765 } else {
766 hwc->config_base = x86_pmu.eventsel;
767 hwc->event_base = x86_pmu.perfctr;
768 }
769}
770
Stephane Eranian447a1942010-02-01 14:50:01 +0200771static inline int match_prev_assignment(struct hw_perf_event *hwc,
772 struct cpu_hw_events *cpuc,
773 int i)
774{
775 return hwc->idx == cpuc->assign[i] &&
776 hwc->last_cpu == smp_processor_id() &&
777 hwc->last_tag == cpuc->tags[i];
778}
779
Stephane Eraniand76a0812010-02-08 17:06:01 +0200780static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100781
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200782void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100783{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200784 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
785 struct perf_event *event;
786 struct hw_perf_event *hwc;
787 int i;
788
Robert Richter85cf9db2009-04-29 12:47:20 +0200789 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100790 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100791
792 if (cpuc->enabled)
793 return;
794
Stephane Eranian1da53e02010-01-18 10:58:01 +0200795 if (cpuc->n_added) {
796 /*
797 * apply assignment obtained either from
798 * hw_perf_group_sched_in() or x86_pmu_enable()
799 *
800 * step1: save events moving to new counters
801 * step2: reprogram moved events into new counters
802 */
803 for (i = 0; i < cpuc->n_events; i++) {
804
805 event = cpuc->event_list[i];
806 hwc = &event->hw;
807
Stephane Eranian447a1942010-02-01 14:50:01 +0200808 /*
809 * we can avoid reprogramming counter if:
810 * - assigned same counter as last time
811 * - running on same CPU as last time
812 * - no other event has used the counter since
813 */
814 if (hwc->idx == -1 ||
815 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200816 continue;
817
Stephane Eraniand76a0812010-02-08 17:06:01 +0200818 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819
820 hwc->idx = -1;
821 }
822
823 for (i = 0; i < cpuc->n_events; i++) {
824
825 event = cpuc->event_list[i];
826 hwc = &event->hw;
827
828 if (hwc->idx == -1) {
Stephane Eranian447a1942010-02-01 14:50:01 +0200829 x86_assign_hw_event(event, cpuc, i);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200830 x86_perf_event_set_period(event, hwc, hwc->idx);
831 }
832 /*
833 * need to mark as active because x86_pmu_disable()
Stephane Eranian447a1942010-02-01 14:50:01 +0200834 * clear active_mask and events[] yet it preserves
Stephane Eranian1da53e02010-01-18 10:58:01 +0200835 * idx
836 */
837 set_bit(hwc->idx, cpuc->active_mask);
838 cpuc->events[hwc->idx] = event;
839
840 x86_pmu.enable(hwc, hwc->idx);
841 perf_event_update_userpage(event);
842 }
843 cpuc->n_added = 0;
844 perf_events_lapic_init();
845 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100846
847 cpuc->enabled = 1;
848 barrier();
849
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200850 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100851}
Ingo Molnaree060942008-12-13 09:00:03 +0100852
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100853static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100854{
Vince Weaver11d15782009-07-08 17:46:14 -0400855 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100856 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100857}
858
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200859static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100860{
Vince Weaver11d15782009-07-08 17:46:14 -0400861 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100862}
863
Tejun Heo245b2e72009-06-24 15:13:48 +0900864static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100865
Ingo Molnaree060942008-12-13 09:00:03 +0100866/*
867 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200868 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100869 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200870static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200871x86_perf_event_set_period(struct perf_event *event,
872 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100873{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100874 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200875 s64 period = hwc->sample_period;
876 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100877
Markus Metzger30dd5682009-07-21 15:56:48 +0200878 if (idx == X86_PMC_IDX_FIXED_BTS)
879 return 0;
880
Ingo Molnaree060942008-12-13 09:00:03 +0100881 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200882 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100883 */
884 if (unlikely(left <= -period)) {
885 left = period;
886 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200887 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200888 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100889 }
890
891 if (unlikely(left <= 0)) {
892 left += period;
893 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200894 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200895 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100896 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200897 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200898 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200899 */
900 if (unlikely(left < 2))
901 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100902
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200903 if (left > x86_pmu.max_period)
904 left = x86_pmu.max_period;
905
Tejun Heo245b2e72009-06-24 15:13:48 +0900906 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100907
908 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200909 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100910 * mark it to be able to extra future deltas:
911 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100912 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100913
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200914 err = checking_wrmsrl(hwc->event_base + idx,
915 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200916
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200917 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200918
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200919 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100920}
921
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100922static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +0200923{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200925 if (cpuc->enabled)
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100926 __x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100927}
928
Ingo Molnaree060942008-12-13 09:00:03 +0100929/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200930 * activate a single event
931 *
932 * The event is added to the group of enabled events
933 * but only if it can be scehduled with existing events.
934 *
935 * Called with PMU disabled. If successful and return value 1,
936 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200937 */
938static int x86_pmu_enable(struct perf_event *event)
939{
940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200941 struct hw_perf_event *hwc;
942 int assign[X86_PMC_IDX_MAX];
943 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200944
Stephane Eranian1da53e02010-01-18 10:58:01 +0200945 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200946
Stephane Eranian1da53e02010-01-18 10:58:01 +0200947 n0 = cpuc->n_events;
948 n = collect_events(cpuc, event, false);
949 if (n < 0)
950 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200951
Stephane Eranian1da53e02010-01-18 10:58:01 +0200952 ret = x86_schedule_events(cpuc, n, assign);
953 if (ret)
954 return ret;
955 /*
956 * copy new assignment, now we know it is possible
957 * will be used by hw_perf_enable()
958 */
959 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100960
Stephane Eranian1da53e02010-01-18 10:58:01 +0200961 cpuc->n_events = n;
962 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100963
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100964 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100965}
966
Stephane Eraniand76a0812010-02-08 17:06:01 +0200967static int x86_pmu_start(struct perf_event *event)
968{
969 struct hw_perf_event *hwc = &event->hw;
970
971 if (hwc->idx == -1)
972 return -EAGAIN;
973
974 x86_perf_event_set_period(event, hwc, hwc->idx);
975 x86_pmu.enable(hwc, hwc->idx);
976
977 return 0;
978}
979
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200980static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200981{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200982 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
983 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200984
985 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200987 return;
988
989 x86_pmu.enable(hwc, hwc->idx);
990}
991
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200992void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100993{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100994 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200995 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200996 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100997 int cpu, idx;
998
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200999 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001000 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001001
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001002 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001003
1004 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001005 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001006
Robert Richterfaa28ae2009-04-29 12:47:13 +02001007 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301008 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1009 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1010 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1011 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001012
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301013 pr_info("\n");
1014 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1015 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1016 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1017 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301018 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001019 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001020
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001021 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001022 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1023 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001024
Tejun Heo245b2e72009-06-24 15:13:48 +09001025 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001026
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301027 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001028 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301029 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001030 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301031 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001032 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001033 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001034 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001035 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1036
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301037 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001038 cpu, idx, pmc_count);
1039 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001040 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001041}
1042
Stephane Eraniand76a0812010-02-08 17:06:01 +02001043static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001044{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001045 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001046 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001047 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001048
Robert Richter09534232009-04-29 12:47:16 +02001049 /*
1050 * Must be done before we disable, otherwise the nmi handler
1051 * could reenable again:
1052 */
Robert Richter43f62012009-04-29 16:55:56 +02001053 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001054 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001055
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001056 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001057 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001058 * that we are disabling:
1059 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001060 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001061
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001062 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001063}
1064
1065static void x86_pmu_disable(struct perf_event *event)
1066{
1067 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1068 int i;
1069
Stephane Eraniand76a0812010-02-08 17:06:01 +02001070 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001071
Stephane Eranian1da53e02010-01-18 10:58:01 +02001072 for (i = 0; i < cpuc->n_events; i++) {
1073 if (event == cpuc->event_list[i]) {
1074
1075 if (x86_pmu.put_event_constraints)
1076 x86_pmu.put_event_constraints(cpuc, event);
1077
1078 while (++i < cpuc->n_events)
1079 cpuc->event_list[i-1] = cpuc->event_list[i];
1080
1081 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001082 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001083 }
1084 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001085 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001086}
1087
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001088static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001089{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001090 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001091 struct cpu_hw_events *cpuc;
1092 struct perf_event *event;
1093 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001094 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001095 u64 val;
1096
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001097 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001098 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001099
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001101
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001102 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001103 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001104 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001105
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001106 event = cpuc->events[idx];
1107 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001108
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001109 val = x86_perf_event_update(event, hwc, idx);
1110 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001111 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001112
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001113 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001114 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001115 */
1116 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001117 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001118
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001120 continue;
1121
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001123 x86_pmu.disable(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001124 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001125
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001126 if (handled)
1127 inc_irq_stat(apic_perf_irqs);
1128
Robert Richtera29aa8a2009-04-29 12:47:21 +02001129 return handled;
1130}
Robert Richter39d81ea2009-04-29 12:47:05 +02001131
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001132void smp_perf_pending_interrupt(struct pt_regs *regs)
1133{
1134 irq_enter();
1135 ack_APIC_irq();
1136 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001137 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001138 irq_exit();
1139}
1140
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001141void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001142{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001143#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001144 if (!x86_pmu.apic || !x86_pmu_initialized())
1145 return;
1146
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001147 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001148#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001149}
1150
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001151void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001152{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001153#ifdef CONFIG_X86_LOCAL_APIC
1154 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001155 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001156
Ingo Molnar241771e2008-12-03 10:39:53 +01001157 /*
Yong Wangc323d952009-05-29 13:28:35 +08001158 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001159 */
Yong Wangc323d952009-05-29 13:28:35 +08001160 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001161#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001162}
1163
1164static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001165perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001166 unsigned long cmd, void *__args)
1167{
1168 struct die_args *args = __args;
1169 struct pt_regs *regs;
1170
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001171 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001172 return NOTIFY_DONE;
1173
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001174 switch (cmd) {
1175 case DIE_NMI:
1176 case DIE_NMI_IPI:
1177 break;
1178
1179 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001180 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001181 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001182
1183 regs = args->regs;
1184
Ingo Molnar04da8a42009-08-11 10:40:08 +02001185#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001186 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001187#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001188 /*
1189 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001190 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001191 *
1192 * If the first NMI handles both, the latter will be empty and daze
1193 * the CPU.
1194 */
Yong Wanga3288102009-06-03 13:12:55 +08001195 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001196
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001197 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001198}
1199
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001200static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1201 .notifier_call = perf_event_nmi_handler,
1202 .next = NULL,
1203 .priority = 1
1204};
1205
Peter Zijlstra63b14642010-01-22 16:32:17 +01001206static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001207static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001208
Peter Zijlstra63b14642010-01-22 16:32:17 +01001209static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001210x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001211{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001212 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001213
Stephane Eranian1da53e02010-01-18 10:58:01 +02001214 if (x86_pmu.event_constraints) {
1215 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001216 if ((event->hw.config & c->cmask) == c->code)
1217 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001218 }
1219 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001220
1221 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001222}
1223
Stephane Eranian1da53e02010-01-18 10:58:01 +02001224static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001225 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001226{
1227 int ret = 0;
1228
1229 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001230 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001231 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1232
1233 if (!is_x86_event(event))
1234 ret = event->pmu->enable(event);
1235
1236 if (!ret && !is_software_event(event))
1237 cpuctx->active_oncpu++;
1238
1239 if (!ret && event->attr.exclusive)
1240 cpuctx->exclusive = 1;
1241
1242 return ret;
1243}
1244
1245static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001246 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001247{
1248 event->state = PERF_EVENT_STATE_INACTIVE;
1249 event->oncpu = -1;
1250
1251 if (!is_x86_event(event))
1252 event->pmu->disable(event);
1253
1254 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1255
1256 if (!is_software_event(event))
1257 cpuctx->active_oncpu--;
1258
1259 if (event->attr.exclusive || !cpuctx->active_oncpu)
1260 cpuctx->exclusive = 0;
1261}
1262
1263/*
1264 * Called to enable a whole group of events.
1265 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1266 * Assumes the caller has disabled interrupts and has
1267 * frozen the PMU with hw_perf_save_disable.
1268 *
1269 * called with PMU disabled. If successful and return value 1,
1270 * then guaranteed to call perf_enable() and hw_perf_enable()
1271 */
1272int hw_perf_group_sched_in(struct perf_event *leader,
1273 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001274 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001275{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001276 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001277 struct perf_event *sub;
1278 int assign[X86_PMC_IDX_MAX];
1279 int n0, n1, ret;
1280
1281 /* n0 = total number of events */
1282 n0 = collect_events(cpuc, leader, true);
1283 if (n0 < 0)
1284 return n0;
1285
1286 ret = x86_schedule_events(cpuc, n0, assign);
1287 if (ret)
1288 return ret;
1289
Peter Zijlstra6e377382010-02-11 13:21:58 +01001290 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001291 if (ret)
1292 return ret;
1293
1294 n1 = 1;
1295 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001296 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001297 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001298 if (ret)
1299 goto undo;
1300 ++n1;
1301 }
1302 }
1303 /*
1304 * copy new assignment, now we know it is possible
1305 * will be used by hw_perf_enable()
1306 */
1307 memcpy(cpuc->assign, assign, n0*sizeof(int));
1308
1309 cpuc->n_events = n0;
1310 cpuc->n_added = n1;
1311 ctx->nr_active += n1;
1312
1313 /*
1314 * 1 means successful and events are active
1315 * This is not quite true because we defer
1316 * actual activation until hw_perf_enable() but
1317 * this way we* ensure caller won't try to enable
1318 * individual events
1319 */
1320 return 1;
1321undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001322 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001323 n0 = 1;
1324 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1325 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001326 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001327 if (++n0 == n1)
1328 break;
1329 }
1330 }
1331 return ret;
1332}
1333
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001334#include "perf_event_amd.c"
1335#include "perf_event_p6.c"
1336#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301337
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001338static void __init pmu_check_apic(void)
1339{
1340 if (cpu_has_apic)
1341 return;
1342
1343 x86_pmu.apic = 0;
1344 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1345 pr_info("no hardware sampling interrupt available.\n");
1346}
1347
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001348void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301349{
Robert Richter72eae042009-04-29 12:47:10 +02001350 int err;
1351
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001352 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001353
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301354 switch (boot_cpu_data.x86_vendor) {
1355 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001356 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301357 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301358 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001359 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301360 break;
Robert Richter41389602009-04-29 12:47:00 +02001361 default:
1362 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301363 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001364 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001365 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301366 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001367 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301368
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001369 pmu_check_apic();
1370
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001371 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001372
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001373 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1374 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1375 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1376 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001377 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001378 perf_event_mask = (1 << x86_pmu.num_events) - 1;
1379 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01001380
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001381 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1382 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1383 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1384 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001385 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001386
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001387 perf_event_mask |=
1388 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
1389 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001390
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001391 perf_events_lapic_init();
1392 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001393
Peter Zijlstra63b14642010-01-22 16:32:17 +01001394 unconstrained = (struct event_constraint)
Peter Zijlstrafce877e2010-01-29 13:25:12 +01001395 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1396 0, x86_pmu.num_events);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001397
Ingo Molnar57c0c152009-09-21 12:20:38 +02001398 pr_info("... version: %d\n", x86_pmu.version);
1399 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1400 pr_info("... generic registers: %d\n", x86_pmu.num_events);
1401 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
1402 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1403 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1404 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001405}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001406
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001407static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001408{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001409 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01001410}
1411
Robert Richter4aeb0b42009-04-29 12:47:03 +02001412static const struct pmu pmu = {
1413 .enable = x86_pmu_enable,
1414 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001415 .start = x86_pmu_start,
1416 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001417 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001418 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001419};
1420
Stephane Eranian1da53e02010-01-18 10:58:01 +02001421/*
1422 * validate a single event group
1423 *
1424 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001425 * - check events are compatible which each other
1426 * - events do not compete for the same counter
1427 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001428 *
1429 * validation ensures the group can be loaded onto the
1430 * PMU if it was the only group available.
1431 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001432static int validate_group(struct perf_event *event)
1433{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001434 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001435 struct cpu_hw_events *fake_cpuc;
1436 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001437
Peter Zijlstra502568d2010-01-22 14:35:46 +01001438 ret = -ENOMEM;
1439 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1440 if (!fake_cpuc)
1441 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001442
Stephane Eranian1da53e02010-01-18 10:58:01 +02001443 /*
1444 * the event is not yet connected with its
1445 * siblings therefore we must first collect
1446 * existing siblings, then add the new event
1447 * before we can simulate the scheduling
1448 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001449 ret = -ENOSPC;
1450 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001451 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001452 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001453
Peter Zijlstra502568d2010-01-22 14:35:46 +01001454 fake_cpuc->n_events = n;
1455 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001456 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001457 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001458
Peter Zijlstra502568d2010-01-22 14:35:46 +01001459 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001460
Peter Zijlstra502568d2010-01-22 14:35:46 +01001461 ret = x86_schedule_events(fake_cpuc, n, NULL);
1462
1463out_free:
1464 kfree(fake_cpuc);
1465out:
1466 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001467}
1468
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001469const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001470{
Stephane Eranian81130702010-01-21 17:39:01 +02001471 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001472 int err;
1473
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001474 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001475 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001476 /*
1477 * we temporarily connect event to its pmu
1478 * such that validate_group() can classify
1479 * it as an x86 event using is_x86_event()
1480 */
1481 tmp = event->pmu;
1482 event->pmu = &pmu;
1483
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001484 if (event->group_leader != event)
1485 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001486
1487 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001488 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001489 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001490 if (event->destroy)
1491 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001492 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001493 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001494
Robert Richter4aeb0b42009-04-29 12:47:03 +02001495 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001496}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001497
1498/*
1499 * callchain support
1500 */
1501
1502static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001503void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001504{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001505 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001506 entry->ip[entry->nr++] = ip;
1507}
1508
Tejun Heo245b2e72009-06-24 15:13:48 +09001509static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1510static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001511
1512
1513static void
1514backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1515{
1516 /* Ignore warnings */
1517}
1518
1519static void backtrace_warning(void *data, char *msg)
1520{
1521 /* Ignore warnings */
1522}
1523
1524static int backtrace_stack(void *data, char *name)
1525{
Ingo Molnar038e8362009-06-15 09:57:59 +02001526 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001527}
1528
1529static void backtrace_address(void *data, unsigned long addr, int reliable)
1530{
1531 struct perf_callchain_entry *entry = data;
1532
1533 if (reliable)
1534 callchain_store(entry, addr);
1535}
1536
1537static const struct stacktrace_ops backtrace_ops = {
1538 .warning = backtrace_warning,
1539 .warning_symbol = backtrace_warning_symbol,
1540 .stack = backtrace_stack,
1541 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001542 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001543};
1544
Ingo Molnar038e8362009-06-15 09:57:59 +02001545#include "../dumpstack.h"
1546
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001547static void
1548perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1549{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001550 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001551 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001552
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001553 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001554}
1555
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001556/*
1557 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1558 */
1559static unsigned long
1560copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001561{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001562 unsigned long offset, addr = (unsigned long)from;
1563 int type = in_nmi() ? KM_NMI : KM_IRQ0;
1564 unsigned long size, len = 0;
1565 struct page *page;
1566 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001567 int ret;
1568
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001569 do {
1570 ret = __get_user_pages_fast(addr, 1, 0, &page);
1571 if (!ret)
1572 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001573
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001574 offset = addr & (PAGE_SIZE - 1);
1575 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001576
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001577 map = kmap_atomic(page, type);
1578 memcpy(to, map+offset, size);
1579 kunmap_atomic(map, type);
1580 put_page(page);
1581
1582 len += size;
1583 to += size;
1584 addr += size;
1585
1586 } while (len < n);
1587
1588 return len;
1589}
1590
1591static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1592{
1593 unsigned long bytes;
1594
1595 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1596
1597 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001598}
1599
1600static void
1601perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1602{
1603 struct stack_frame frame;
1604 const void __user *fp;
1605
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001606 if (!user_mode(regs))
1607 regs = task_pt_regs(current);
1608
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001609 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001610
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001611 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001612 callchain_store(entry, regs->ip);
1613
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001614 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001615 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001616 frame.return_address = 0;
1617
1618 if (!copy_stack_frame(fp, &frame))
1619 break;
1620
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001621 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001622 break;
1623
1624 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001625 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001626 }
1627}
1628
1629static void
1630perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1631{
1632 int is_user;
1633
1634 if (!regs)
1635 return;
1636
1637 is_user = user_mode(regs);
1638
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001639 if (is_user && current->state != TASK_RUNNING)
1640 return;
1641
1642 if (!is_user)
1643 perf_callchain_kernel(regs, entry);
1644
1645 if (current->mm)
1646 perf_callchain_user(regs, entry);
1647}
1648
1649struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1650{
1651 struct perf_callchain_entry *entry;
1652
1653 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001654 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001655 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001656 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001657
1658 entry->nr = 0;
1659
1660 perf_do_callchain(regs, entry);
1661
1662 return entry;
1663}
Markus Metzger30dd5682009-07-21 15:56:48 +02001664
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001665void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02001666{
1667 init_debug_store_on_cpu(cpu);
Stephane Eranian38331f62010-02-08 17:17:01 +02001668
1669 switch (boot_cpu_data.x86_vendor) {
1670 case X86_VENDOR_AMD:
1671 amd_pmu_cpu_online(cpu);
1672 break;
1673 default:
1674 return;
1675 }
1676}
1677
1678void hw_perf_event_setup_offline(int cpu)
1679{
1680 init_debug_store_on_cpu(cpu);
1681
1682 switch (boot_cpu_data.x86_vendor) {
1683 case X86_VENDOR_AMD:
1684 amd_pmu_cpu_offline(cpu);
1685 break;
1686 default:
1687 return;
1688 }
Markus Metzger30dd5682009-07-21 15:56:48 +02001689}