Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1 | /* |
| 2 | * Performance events x86 architecture header |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
| 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
| 10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
| 11 | * |
| 12 | * For licencing details see kernel-base/COPYING |
| 13 | */ |
| 14 | |
| 15 | #include <linux/perf_event.h> |
| 16 | |
Peter Zijlstra | 1c2ac3f | 2012-05-14 15:25:34 +0200 | [diff] [blame] | 17 | #if 0 |
| 18 | #undef wrmsrl |
| 19 | #define wrmsrl(msr, val) \ |
| 20 | do { \ |
| 21 | unsigned int _msr = (msr); \ |
| 22 | u64 _val = (val); \ |
| 23 | trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \ |
| 24 | (unsigned long long)(_val)); \ |
| 25 | native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \ |
| 26 | } while (0) |
| 27 | #endif |
| 28 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 29 | /* |
| 30 | * | NHM/WSM | SNB | |
| 31 | * register ------------------------------- |
| 32 | * | HT | no HT | HT | no HT | |
| 33 | *----------------------------------------- |
| 34 | * offcore | core | core | cpu | core | |
| 35 | * lbr_sel | core | core | cpu | core | |
| 36 | * ld_lat | cpu | core | cpu | core | |
| 37 | *----------------------------------------- |
| 38 | * |
| 39 | * Given that there is a small number of shared regs, |
| 40 | * we can pre-allocate their slot in the per-cpu |
| 41 | * per-core reg tables. |
| 42 | */ |
| 43 | enum extra_reg_type { |
| 44 | EXTRA_REG_NONE = -1, /* not used */ |
| 45 | |
| 46 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ |
| 47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
Stephane Eranian | b36817e | 2012-02-09 23:20:53 +0100 | [diff] [blame] | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
Andi Kleen | d0dc849 | 2015-09-09 14:53:59 -0700 | [diff] [blame] | 50 | EXTRA_REG_FE = 4, /* fe_* */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 51 | |
| 52 | EXTRA_REG_MAX /* number of entries needed */ |
| 53 | }; |
| 54 | |
| 55 | struct event_constraint { |
| 56 | union { |
| 57 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 58 | u64 idxmsk64; |
| 59 | }; |
| 60 | u64 code; |
| 61 | u64 cmask; |
| 62 | int weight; |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 63 | int overlap; |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 64 | int flags; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 65 | }; |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 66 | /* |
Stephane Eranian | 2f7f73a | 2013-06-20 18:42:54 +0200 | [diff] [blame] | 67 | * struct hw_perf_event.flags flags |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 68 | */ |
Peter Zijlstra | c857eb5 | 2015-04-15 20:14:53 +0200 | [diff] [blame] | 69 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ |
| 70 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ |
| 71 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ |
| 72 | #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */ |
| 73 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */ |
| 74 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */ |
| 75 | #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ |
| 76 | #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ |
| 77 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 78 | #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ |
Yan, Zheng | 851559e | 2015-05-06 15:33:47 -0400 | [diff] [blame] | 79 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 80 | #define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */ |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 81 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 82 | |
| 83 | struct amd_nb { |
| 84 | int nb_id; /* NorthBridge id */ |
| 85 | int refcnt; /* reference count */ |
| 86 | struct perf_event *owners[X86_PMC_IDX_MAX]; |
| 87 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
| 88 | }; |
| 89 | |
| 90 | /* The maximal number of PEBS events: */ |
Andi Kleen | 70ab700 | 2012-06-05 17:56:48 -0700 | [diff] [blame] | 91 | #define MAX_PEBS_EVENTS 8 |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 92 | |
| 93 | /* |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 94 | * Flags PEBS can handle without an PMI. |
| 95 | * |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 96 | * TID can only be handled by flushing at context switch. |
| 97 | * |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 98 | */ |
| 99 | #define PEBS_FREERUNNING_FLAGS \ |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 100 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 101 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
| 102 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ |
| 103 | PERF_SAMPLE_TRANSACTION) |
| 104 | |
| 105 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 106 | * A debug store configuration. |
| 107 | * |
| 108 | * We only support architectures that use 64bit fields. |
| 109 | */ |
| 110 | struct debug_store { |
| 111 | u64 bts_buffer_base; |
| 112 | u64 bts_index; |
| 113 | u64 bts_absolute_maximum; |
| 114 | u64 bts_interrupt_threshold; |
| 115 | u64 pebs_buffer_base; |
| 116 | u64 pebs_index; |
| 117 | u64 pebs_absolute_maximum; |
| 118 | u64 pebs_interrupt_threshold; |
| 119 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; |
| 120 | }; |
| 121 | |
| 122 | /* |
| 123 | * Per register state. |
| 124 | */ |
| 125 | struct er_account { |
| 126 | raw_spinlock_t lock; /* per-core: protect structure */ |
| 127 | u64 config; /* extra MSR config */ |
| 128 | u64 reg; /* extra MSR number */ |
| 129 | atomic_t ref; /* reference count */ |
| 130 | }; |
| 131 | |
| 132 | /* |
| 133 | * Per core/cpu state |
| 134 | * |
| 135 | * Used to coordinate shared registers between HT threads or |
| 136 | * among events on a single PMU. |
| 137 | */ |
| 138 | struct intel_shared_regs { |
| 139 | struct er_account regs[EXTRA_REG_MAX]; |
| 140 | int refcnt; /* per-core: #HT threads */ |
| 141 | unsigned core_id; /* per-core: core id */ |
| 142 | }; |
| 143 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 144 | enum intel_excl_state_type { |
| 145 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ |
| 146 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ |
| 147 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ |
| 148 | }; |
| 149 | |
| 150 | struct intel_excl_states { |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 151 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
Maria Dimakopoulou | e979121 | 2014-11-17 20:06:58 +0100 | [diff] [blame] | 152 | bool sched_started; /* true if scheduling has started */ |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 153 | }; |
| 154 | |
| 155 | struct intel_excl_cntrs { |
| 156 | raw_spinlock_t lock; |
| 157 | |
| 158 | struct intel_excl_states states[2]; |
| 159 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 160 | union { |
| 161 | u16 has_exclusive[2]; |
| 162 | u32 exclusive_present; |
| 163 | }; |
| 164 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 165 | int refcnt; /* per-core: #HT threads */ |
| 166 | unsigned core_id; /* per-core: core id */ |
| 167 | }; |
| 168 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 169 | #define MAX_LBR_ENTRIES 32 |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 170 | |
Stephane Eranian | 9041346 | 2014-11-17 20:06:54 +0100 | [diff] [blame] | 171 | enum { |
| 172 | X86_PERF_KFREE_SHARED = 0, |
| 173 | X86_PERF_KFREE_EXCL = 1, |
| 174 | X86_PERF_KFREE_MAX |
| 175 | }; |
| 176 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 177 | struct cpu_hw_events { |
| 178 | /* |
| 179 | * Generic x86 PMC bits |
| 180 | */ |
| 181 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
| 182 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 183 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 184 | int enabled; |
| 185 | |
Peter Zijlstra | c347a2f | 2014-02-24 12:26:21 +0100 | [diff] [blame] | 186 | int n_events; /* the # of events in the below arrays */ |
| 187 | int n_added; /* the # last events in the below arrays; |
| 188 | they've never been enabled yet */ |
| 189 | int n_txn; /* the # last events in the below arrays; |
| 190 | added in the current transaction */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 191 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
| 192 | u64 tags[X86_PMC_IDX_MAX]; |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 193 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 194 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 195 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
| 196 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 197 | int n_excl; /* the number of exclusive events */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 198 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 199 | unsigned int txn_flags; |
Peter Zijlstra | 5a425294 | 2012-06-05 15:30:31 +0200 | [diff] [blame] | 200 | int is_fake; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 201 | |
| 202 | /* |
| 203 | * Intel DebugStore bits |
| 204 | */ |
| 205 | struct debug_store *ds; |
| 206 | u64 pebs_enabled; |
| 207 | |
| 208 | /* |
| 209 | * Intel LBR bits |
| 210 | */ |
| 211 | int lbr_users; |
| 212 | void *lbr_context; |
| 213 | struct perf_branch_stack lbr_stack; |
| 214 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
Stephane Eranian | b36817e | 2012-02-09 23:20:53 +0100 | [diff] [blame] | 215 | struct er_account *lbr_sel; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 216 | u64 br_sel; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 217 | |
| 218 | /* |
Gleb Natapov | 144d31e | 2011-10-05 14:01:21 +0200 | [diff] [blame] | 219 | * Intel host/guest exclude bits |
| 220 | */ |
| 221 | u64 intel_ctrl_guest_mask; |
| 222 | u64 intel_ctrl_host_mask; |
| 223 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; |
| 224 | |
| 225 | /* |
Peter Zijlstra | 2b9e344 | 2013-09-12 12:53:44 +0200 | [diff] [blame] | 226 | * Intel checkpoint mask |
| 227 | */ |
| 228 | u64 intel_cp_status; |
| 229 | |
| 230 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 231 | * manage shared (per-core, per-cpu) registers |
| 232 | * used on Intel NHM/WSM/SNB |
| 233 | */ |
| 234 | struct intel_shared_regs *shared_regs; |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 235 | /* |
| 236 | * manage exclusive counter access between hyperthread |
| 237 | */ |
| 238 | struct event_constraint *constraint_list; /* in enable order */ |
| 239 | struct intel_excl_cntrs *excl_cntrs; |
| 240 | int excl_thread_id; /* 0 or 1 */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 241 | |
| 242 | /* |
| 243 | * AMD specific bits |
| 244 | */ |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 245 | struct amd_nb *amd_nb; |
| 246 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ |
| 247 | u64 perf_ctr_virt_mask; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 248 | |
Stephane Eranian | 9041346 | 2014-11-17 20:06:54 +0100 | [diff] [blame] | 249 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 250 | }; |
| 251 | |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 252 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 253 | { .idxmsk64 = (n) }, \ |
| 254 | .code = (c), \ |
| 255 | .cmask = (m), \ |
| 256 | .weight = (w), \ |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 257 | .overlap = (o), \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 258 | .flags = f, \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | #define EVENT_CONSTRAINT(c, n, m) \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 262 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 263 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 264 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
| 265 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ |
| 266 | 0, PERF_X86_EVENT_EXCL) |
| 267 | |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 268 | /* |
| 269 | * The overlap flag marks event constraints with overlapping counter |
| 270 | * masks. This is the case if the counter mask of such an event is not |
| 271 | * a subset of any other counter mask of a constraint with an equal or |
| 272 | * higher weight, e.g.: |
| 273 | * |
| 274 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
| 275 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); |
| 276 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); |
| 277 | * |
| 278 | * The event scheduler may not select the correct counter in the first |
| 279 | * cycle because it needs to know which subsequent events will be |
| 280 | * scheduled. It may fail to schedule the events then. So we set the |
| 281 | * overlap flag for such constraints to give the scheduler a hint which |
| 282 | * events to select for counter rescheduling. |
| 283 | * |
| 284 | * Care must be taken as the rescheduling algorithm is O(n!) which |
| 285 | * will increase scheduling cycles for an over-commited system |
| 286 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
| 287 | * and its counter masks must be kept at a minimum. |
| 288 | */ |
| 289 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 290 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 291 | |
| 292 | /* |
| 293 | * Constraint on the Event code. |
| 294 | */ |
| 295 | #define INTEL_EVENT_CONSTRAINT(c, n) \ |
| 296 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) |
| 297 | |
| 298 | /* |
| 299 | * Constraint on the Event code + UMask + fixed-mask |
| 300 | * |
| 301 | * filter mask to validate fixed counter events. |
| 302 | * the following filters disqualify for fixed counters: |
| 303 | * - inv |
| 304 | * - edge |
| 305 | * - cnt-mask |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 306 | * - in_tx |
| 307 | * - in_tx_checkpointed |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 308 | * The other filters are supported by fixed counters. |
| 309 | * The any-thread option is supported starting with v3. |
| 310 | */ |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 311 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 312 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 313 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 314 | |
| 315 | /* |
| 316 | * Constraint on the Event code + UMask |
| 317 | */ |
| 318 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
| 319 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
| 320 | |
Andi Kleen | b7883a1 | 2015-11-16 16:21:07 -0800 | [diff] [blame^] | 321 | /* Constraint on specific umask bit only + event */ |
| 322 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ |
| 323 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) |
| 324 | |
Andi Kleen | 7550ddf | 2014-09-24 07:34:46 -0700 | [diff] [blame] | 325 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
| 326 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ |
| 327 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
| 328 | |
Maria Dimakopoulou | e979121 | 2014-11-17 20:06:58 +0100 | [diff] [blame] | 329 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
| 330 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
| 331 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) |
| 332 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 333 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 334 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 335 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
| 336 | |
Stephane Eranian | 9ad64c0 | 2013-01-24 16:10:34 +0100 | [diff] [blame] | 337 | #define INTEL_PST_CONSTRAINT(c, n) \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 338 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
Stephane Eranian | 9ad64c0 | 2013-01-24 16:10:34 +0100 | [diff] [blame] | 339 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
| 340 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 341 | /* Event constraint, but match on all event flags too. */ |
| 342 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ |
| 343 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
| 344 | |
| 345 | /* Check only flags, but allow all event/umask */ |
| 346 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ |
| 347 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) |
| 348 | |
| 349 | /* Check flags and event code, and set the HSW store flag */ |
| 350 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 351 | __EVENT_CONSTRAINT(code, n, \ |
| 352 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
Andi Kleen | f9134f3 | 2013-06-17 17:36:52 -0700 | [diff] [blame] | 353 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 354 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 355 | /* Check flags and event code, and set the HSW load flag */ |
| 356 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 357 | __EVENT_CONSTRAINT(code, n, \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 358 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 359 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 360 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 361 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 362 | __EVENT_CONSTRAINT(code, n, \ |
| 363 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 364 | HWEIGHT(n), 0, \ |
| 365 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 366 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 367 | /* Check flags and event code/umask, and set the HSW store flag */ |
| 368 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 369 | __EVENT_CONSTRAINT(code, n, \ |
| 370 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 371 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 372 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 373 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
| 374 | __EVENT_CONSTRAINT(code, n, \ |
| 375 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 376 | HWEIGHT(n), 0, \ |
| 377 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) |
| 378 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 379 | /* Check flags and event code/umask, and set the HSW load flag */ |
| 380 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ |
| 381 | __EVENT_CONSTRAINT(code, n, \ |
| 382 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 383 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 384 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 385 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 386 | __EVENT_CONSTRAINT(code, n, \ |
| 387 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 388 | HWEIGHT(n), 0, \ |
| 389 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 390 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 391 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
| 392 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ |
| 393 | __EVENT_CONSTRAINT(code, n, \ |
| 394 | INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ |
| 395 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
| 396 | |
| 397 | |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 398 | /* |
| 399 | * We define the end marker as having a weight of -1 |
| 400 | * to enable blacklisting of events using a counter bitmask |
| 401 | * of zero and thus a weight of zero. |
| 402 | * The end marker has a weight that cannot possibly be |
| 403 | * obtained from counting the bits in the bitmask. |
| 404 | */ |
| 405 | #define EVENT_CONSTRAINT_END { .weight = -1 } |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 406 | |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 407 | /* |
| 408 | * Check for end marker with weight == -1 |
| 409 | */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 410 | #define for_each_event_constraint(e, c) \ |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 411 | for ((e) = (c); (e)->weight != -1; (e)++) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 412 | |
| 413 | /* |
| 414 | * Extra registers for specific events. |
| 415 | * |
| 416 | * Some events need large masks and require external MSRs. |
| 417 | * Those extra MSRs end up being shared for all events on |
| 418 | * a PMU and sometimes between PMU of sibling HT threads. |
| 419 | * In either case, the kernel needs to handle conflicting |
| 420 | * accesses to those extra, shared, regs. The data structure |
| 421 | * to manage those registers is stored in cpu_hw_event. |
| 422 | */ |
| 423 | struct extra_reg { |
| 424 | unsigned int event; |
| 425 | unsigned int msr; |
| 426 | u64 config_mask; |
| 427 | u64 valid_mask; |
| 428 | int idx; /* per_xxx->regs[] reg index */ |
Kan Liang | 338b522 | 2014-07-14 12:25:56 -0700 | [diff] [blame] | 429 | bool extra_msr_access; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 430 | }; |
| 431 | |
| 432 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
Kan Liang | 338b522 | 2014-07-14 12:25:56 -0700 | [diff] [blame] | 433 | .event = (e), \ |
| 434 | .msr = (ms), \ |
| 435 | .config_mask = (m), \ |
| 436 | .valid_mask = (vm), \ |
| 437 | .idx = EXTRA_REG_##i, \ |
| 438 | .extra_msr_access = true, \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 442 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) |
| 443 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 444 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 445 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ |
| 446 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) |
| 447 | |
| 448 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ |
| 449 | INTEL_UEVENT_EXTRA_REG(c, \ |
| 450 | MSR_PEBS_LD_LAT_THRESHOLD, \ |
| 451 | 0xffff, \ |
| 452 | LDLAT) |
| 453 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 454 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
| 455 | |
| 456 | union perf_capabilities { |
| 457 | struct { |
| 458 | u64 lbr_format:6; |
| 459 | u64 pebs_trap:1; |
| 460 | u64 pebs_arch_reg:1; |
| 461 | u64 pebs_format:4; |
| 462 | u64 smm_freeze:1; |
Andi Kleen | 069e0c3 | 2013-06-25 08:12:33 -0700 | [diff] [blame] | 463 | /* |
| 464 | * PMU supports separate counter range for writing |
| 465 | * values > 32bit. |
| 466 | */ |
| 467 | u64 full_width_write:1; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 468 | }; |
| 469 | u64 capabilities; |
| 470 | }; |
| 471 | |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 472 | struct x86_pmu_quirk { |
| 473 | struct x86_pmu_quirk *next; |
| 474 | void (*func)(void); |
| 475 | }; |
| 476 | |
Peter Zijlstra | f9b4eeb | 2012-03-12 12:44:35 +0100 | [diff] [blame] | 477 | union x86_pmu_config { |
| 478 | struct { |
| 479 | u64 event:8, |
| 480 | umask:8, |
| 481 | usr:1, |
| 482 | os:1, |
| 483 | edge:1, |
| 484 | pc:1, |
| 485 | interrupt:1, |
| 486 | __reserved1:1, |
| 487 | en:1, |
| 488 | inv:1, |
| 489 | cmask:8, |
| 490 | event2:4, |
| 491 | __reserved2:4, |
| 492 | go:1, |
| 493 | ho:1; |
| 494 | } bits; |
| 495 | u64 value; |
| 496 | }; |
| 497 | |
| 498 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value |
| 499 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 500 | enum { |
| 501 | x86_lbr_exclusive_lbr, |
Alexander Shishkin | 8062382 | 2015-01-30 12:40:35 +0200 | [diff] [blame] | 502 | x86_lbr_exclusive_bts, |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 503 | x86_lbr_exclusive_pt, |
| 504 | x86_lbr_exclusive_max, |
| 505 | }; |
| 506 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 507 | /* |
| 508 | * struct x86_pmu - generic x86 pmu |
| 509 | */ |
| 510 | struct x86_pmu { |
| 511 | /* |
| 512 | * Generic x86 PMC bits |
| 513 | */ |
| 514 | const char *name; |
| 515 | int version; |
| 516 | int (*handle_irq)(struct pt_regs *); |
| 517 | void (*disable_all)(void); |
| 518 | void (*enable_all)(int added); |
| 519 | void (*enable)(struct perf_event *); |
| 520 | void (*disable)(struct perf_event *); |
| 521 | int (*hw_config)(struct perf_event *event); |
| 522 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
| 523 | unsigned eventsel; |
| 524 | unsigned perfctr; |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 525 | int (*addr_offset)(int index, bool eventsel); |
Jacob Shin | 0fbdad0 | 2013-02-06 11:26:28 -0600 | [diff] [blame] | 526 | int (*rdpmc_index)(int index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 527 | u64 (*event_map)(int); |
| 528 | int max_events; |
| 529 | int num_counters; |
| 530 | int num_counters_fixed; |
| 531 | int cntval_bits; |
| 532 | u64 cntval_mask; |
Gleb Natapov | ffb871b | 2011-11-10 14:57:26 +0200 | [diff] [blame] | 533 | union { |
| 534 | unsigned long events_maskl; |
| 535 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; |
| 536 | }; |
| 537 | int events_mask_len; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 538 | int apic; |
| 539 | u64 max_period; |
| 540 | struct event_constraint * |
| 541 | (*get_event_constraints)(struct cpu_hw_events *cpuc, |
Stephane Eranian | 79cba82 | 2014-11-17 20:06:56 +0100 | [diff] [blame] | 542 | int idx, |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 543 | struct perf_event *event); |
| 544 | |
| 545 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
| 546 | struct perf_event *event); |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 547 | |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 548 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
| 549 | |
Peter Zijlstra | 0c41e75 | 2015-05-21 10:57:32 +0200 | [diff] [blame] | 550 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
| 551 | |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 552 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
| 553 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 554 | struct event_constraint *event_constraints; |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 555 | struct x86_pmu_quirk *quirks; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 556 | int perfctr_second_write; |
Andi Kleen | 72db559 | 2013-06-17 17:36:50 -0700 | [diff] [blame] | 557 | bool late_ack; |
Andi Kleen | 294fe0f | 2015-02-17 18:18:06 -0800 | [diff] [blame] | 558 | unsigned (*limit_period)(struct perf_event *event, unsigned l); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 559 | |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 560 | /* |
| 561 | * sysfs attrs |
| 562 | */ |
Peter Zijlstra | e97df76 | 2014-02-05 20:48:51 +0100 | [diff] [blame] | 563 | int attr_rdpmc_broken; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 564 | int attr_rdpmc; |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 565 | struct attribute **format_attrs; |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 566 | struct attribute **event_attrs; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 567 | |
Jiri Olsa | a474739 | 2012-10-10 14:53:11 +0200 | [diff] [blame] | 568 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
Andi Kleen | 1a6461b | 2013-01-24 16:10:25 +0100 | [diff] [blame] | 569 | struct attribute **cpu_events; |
Jiri Olsa | a474739 | 2012-10-10 14:53:11 +0200 | [diff] [blame] | 570 | |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 571 | /* |
| 572 | * CPU Hotplug hooks |
| 573 | */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 574 | int (*cpu_prepare)(int cpu); |
| 575 | void (*cpu_starting)(int cpu); |
| 576 | void (*cpu_dying)(int cpu); |
| 577 | void (*cpu_dead)(int cpu); |
Peter Zijlstra | c93dc84 | 2012-06-08 14:50:50 +0200 | [diff] [blame] | 578 | |
| 579 | void (*check_microcode)(void); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 580 | void (*sched_task)(struct perf_event_context *ctx, |
| 581 | bool sched_in); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 582 | |
| 583 | /* |
| 584 | * Intel Arch Perfmon v2+ |
| 585 | */ |
| 586 | u64 intel_ctrl; |
| 587 | union perf_capabilities intel_cap; |
| 588 | |
| 589 | /* |
| 590 | * Intel DebugStore bits |
| 591 | */ |
Peter Zijlstra | 597ed95 | 2012-07-09 13:50:23 +0200 | [diff] [blame] | 592 | unsigned int bts :1, |
Peter Zijlstra | 3e0091e | 2012-06-26 23:38:39 +0200 | [diff] [blame] | 593 | bts_active :1, |
| 594 | pebs :1, |
| 595 | pebs_active :1, |
| 596 | pebs_broken :1; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 597 | int pebs_record_size; |
| 598 | void (*drain_pebs)(struct pt_regs *regs); |
| 599 | struct event_constraint *pebs_constraints; |
Peter Zijlstra | 0780c92 | 2012-06-05 10:26:43 +0200 | [diff] [blame] | 600 | void (*pebs_aliases)(struct perf_event *event); |
Andi Kleen | 70ab700 | 2012-06-05 17:56:48 -0700 | [diff] [blame] | 601 | int max_pebs_events; |
Andi Kleen | a7b58d2 | 2015-05-27 21:13:14 -0700 | [diff] [blame] | 602 | unsigned long free_running_flags; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 603 | |
| 604 | /* |
| 605 | * Intel LBR |
| 606 | */ |
| 607 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
| 608 | int lbr_nr; /* hardware stack size */ |
Stephane Eranian | b36817e | 2012-02-09 23:20:53 +0100 | [diff] [blame] | 609 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
| 610 | const int *lbr_sel_map; /* lbr_select mappings */ |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 611 | bool lbr_double_abort; /* duplicated lbr aborts */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 612 | |
| 613 | /* |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 614 | * Intel PT/LBR/BTS are exclusive |
| 615 | */ |
| 616 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; |
| 617 | |
| 618 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 619 | * Extra registers for events |
| 620 | */ |
| 621 | struct extra_reg *extra_regs; |
Stephane Eranian | 9a5e3fb | 2014-11-17 20:06:53 +0100 | [diff] [blame] | 622 | unsigned int flags; |
Gleb Natapov | 144d31e | 2011-10-05 14:01:21 +0200 | [diff] [blame] | 623 | |
| 624 | /* |
| 625 | * Intel host/guest support (KVM) |
| 626 | */ |
| 627 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 628 | }; |
| 629 | |
Yan, Zheng | e18bf52 | 2014-11-04 21:56:03 -0500 | [diff] [blame] | 630 | struct x86_perf_task_context { |
| 631 | u64 lbr_from[MAX_LBR_ENTRIES]; |
| 632 | u64 lbr_to[MAX_LBR_ENTRIES]; |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 633 | u64 lbr_info[MAX_LBR_ENTRIES]; |
Andi Kleen | b28ae95 | 2015-10-20 11:46:33 -0700 | [diff] [blame] | 634 | int tos; |
Yan, Zheng | e18bf52 | 2014-11-04 21:56:03 -0500 | [diff] [blame] | 635 | int lbr_callstack_users; |
| 636 | int lbr_stack_state; |
| 637 | }; |
| 638 | |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 639 | #define x86_add_quirk(func_) \ |
| 640 | do { \ |
| 641 | static struct x86_pmu_quirk __quirk __initdata = { \ |
| 642 | .func = func_, \ |
| 643 | }; \ |
| 644 | __quirk.next = x86_pmu.quirks; \ |
| 645 | x86_pmu.quirks = &__quirk; \ |
| 646 | } while (0) |
| 647 | |
Stephane Eranian | 9a5e3fb | 2014-11-17 20:06:53 +0100 | [diff] [blame] | 648 | /* |
| 649 | * x86_pmu flags |
| 650 | */ |
| 651 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ |
| 652 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 653 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
Stephane Eranian | b37609c | 2014-11-17 20:07:04 +0100 | [diff] [blame] | 654 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 655 | |
Stephane Eranian | 3a54aaa | 2013-01-24 16:10:26 +0100 | [diff] [blame] | 656 | #define EVENT_VAR(_id) event_attr_##_id |
| 657 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
| 658 | |
| 659 | #define EVENT_ATTR(_name, _id) \ |
| 660 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ |
| 661 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 662 | .id = PERF_COUNT_HW_##_id, \ |
| 663 | .event_str = NULL, \ |
| 664 | }; |
| 665 | |
| 666 | #define EVENT_ATTR_STR(_name, v, str) \ |
| 667 | static struct perf_pmu_events_attr event_attr_##v = { \ |
| 668 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 669 | .id = 0, \ |
| 670 | .event_str = str, \ |
| 671 | }; |
| 672 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 673 | extern struct x86_pmu x86_pmu __read_mostly; |
| 674 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 675 | static inline bool x86_pmu_has_lbr_callstack(void) |
| 676 | { |
| 677 | return x86_pmu.lbr_sel_map && |
| 678 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; |
| 679 | } |
| 680 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 681 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
| 682 | |
| 683 | int x86_perf_event_set_period(struct perf_event *event); |
| 684 | |
| 685 | /* |
| 686 | * Generalized hw caching related hw_event table, filled |
| 687 | * in on a per model basis. A value of 0 means |
| 688 | * 'not supported', -1 means 'hw_event makes no sense on |
| 689 | * this CPU', any other value means the raw hw_event |
| 690 | * ID. |
| 691 | */ |
| 692 | |
| 693 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 694 | |
| 695 | extern u64 __read_mostly hw_cache_event_ids |
| 696 | [PERF_COUNT_HW_CACHE_MAX] |
| 697 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 698 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 699 | extern u64 __read_mostly hw_cache_extra_regs |
| 700 | [PERF_COUNT_HW_CACHE_MAX] |
| 701 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 702 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 703 | |
| 704 | u64 x86_perf_event_update(struct perf_event *event); |
| 705 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 706 | static inline unsigned int x86_pmu_config_addr(int index) |
| 707 | { |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 708 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
| 709 | x86_pmu.addr_offset(index, true) : index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | static inline unsigned int x86_pmu_event_addr(int index) |
| 713 | { |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 714 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
| 715 | x86_pmu.addr_offset(index, false) : index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 716 | } |
| 717 | |
Jacob Shin | 0fbdad0 | 2013-02-06 11:26:28 -0600 | [diff] [blame] | 718 | static inline int x86_pmu_rdpmc_index(int index) |
| 719 | { |
| 720 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
| 721 | } |
| 722 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 723 | int x86_add_exclusive(unsigned int what); |
| 724 | |
| 725 | void x86_del_exclusive(unsigned int what); |
| 726 | |
Alexander Shishkin | 6b099d9 | 2015-06-11 15:13:56 +0300 | [diff] [blame] | 727 | int x86_reserve_hardware(void); |
| 728 | |
| 729 | void x86_release_hardware(void); |
| 730 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 731 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
| 732 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 733 | int x86_setup_perfctr(struct perf_event *event); |
| 734 | |
| 735 | int x86_pmu_hw_config(struct perf_event *event); |
| 736 | |
| 737 | void x86_pmu_disable_all(void); |
| 738 | |
| 739 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
| 740 | u64 enable_mask) |
| 741 | { |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 742 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
| 743 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 744 | if (hwc->extra_reg.reg) |
| 745 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 746 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 747 | } |
| 748 | |
| 749 | void x86_pmu_enable_all(int added); |
| 750 | |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 751 | int perf_assign_events(struct event_constraint **constraints, int n, |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 752 | int wmin, int wmax, int gpmax, int *assign); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 753 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
| 754 | |
| 755 | void x86_pmu_stop(struct perf_event *event, int flags); |
| 756 | |
| 757 | static inline void x86_pmu_disable_event(struct perf_event *event) |
| 758 | { |
| 759 | struct hw_perf_event *hwc = &event->hw; |
| 760 | |
| 761 | wrmsrl(hwc->config_base, hwc->config); |
| 762 | } |
| 763 | |
| 764 | void x86_pmu_enable_event(struct perf_event *event); |
| 765 | |
| 766 | int x86_pmu_handle_irq(struct pt_regs *regs); |
| 767 | |
| 768 | extern struct event_constraint emptyconstraint; |
| 769 | |
| 770 | extern struct event_constraint unconstrained; |
| 771 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 772 | static inline bool kernel_ip(unsigned long ip) |
| 773 | { |
| 774 | #ifdef CONFIG_X86_32 |
| 775 | return ip > PAGE_OFFSET; |
| 776 | #else |
| 777 | return (long)ip < 0; |
| 778 | #endif |
| 779 | } |
| 780 | |
Peter Zijlstra | d07bdfd | 2012-07-10 09:42:15 +0200 | [diff] [blame] | 781 | /* |
| 782 | * Not all PMUs provide the right context information to place the reported IP |
| 783 | * into full context. Specifically segment registers are typically not |
| 784 | * supplied. |
| 785 | * |
| 786 | * Assuming the address is a linear address (it is for IBS), we fake the CS and |
| 787 | * vm86 mode using the known zero-based code segment and 'fix up' the registers |
| 788 | * to reflect this. |
| 789 | * |
| 790 | * Intel PEBS/LBR appear to typically provide the effective address, nothing |
| 791 | * much we can do about that but pray and treat it like a linear address. |
| 792 | */ |
| 793 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) |
| 794 | { |
| 795 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; |
| 796 | if (regs->flags & X86_VM_MASK) |
| 797 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); |
| 798 | regs->ip = ip; |
| 799 | } |
| 800 | |
Jiri Olsa | 0bf79d4 | 2012-10-10 14:53:14 +0200 | [diff] [blame] | 801 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
Jiri Olsa | 20550a4 | 2012-10-10 14:53:15 +0200 | [diff] [blame] | 802 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
Jiri Olsa | 43c032f | 2012-10-10 14:53:13 +0200 | [diff] [blame] | 803 | |
Andi Kleen | 47732d8 | 2015-06-29 14:22:13 -0700 | [diff] [blame] | 804 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); |
| 805 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 806 | #ifdef CONFIG_CPU_SUP_AMD |
| 807 | |
| 808 | int amd_pmu_init(void); |
| 809 | |
| 810 | #else /* CONFIG_CPU_SUP_AMD */ |
| 811 | |
| 812 | static inline int amd_pmu_init(void) |
| 813 | { |
| 814 | return 0; |
| 815 | } |
| 816 | |
| 817 | #endif /* CONFIG_CPU_SUP_AMD */ |
| 818 | |
| 819 | #ifdef CONFIG_CPU_SUP_INTEL |
| 820 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 821 | static inline bool intel_pmu_has_bts(struct perf_event *event) |
| 822 | { |
| 823 | if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && |
| 824 | !event->attr.freq && event->hw.sample_period == 1) |
| 825 | return true; |
| 826 | |
| 827 | return false; |
| 828 | } |
| 829 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 830 | int intel_pmu_save_and_restart(struct perf_event *event); |
| 831 | |
| 832 | struct event_constraint * |
Stephane Eranian | 79cba82 | 2014-11-17 20:06:56 +0100 | [diff] [blame] | 833 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
| 834 | struct perf_event *event); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 835 | |
| 836 | struct intel_shared_regs *allocate_shared_regs(int cpu); |
| 837 | |
| 838 | int intel_pmu_init(void); |
| 839 | |
| 840 | void init_debug_store_on_cpu(int cpu); |
| 841 | |
| 842 | void fini_debug_store_on_cpu(int cpu); |
| 843 | |
| 844 | void release_ds_buffers(void); |
| 845 | |
| 846 | void reserve_ds_buffers(void); |
| 847 | |
| 848 | extern struct event_constraint bts_constraint; |
| 849 | |
| 850 | void intel_pmu_enable_bts(u64 config); |
| 851 | |
| 852 | void intel_pmu_disable_bts(void); |
| 853 | |
| 854 | int intel_pmu_drain_bts_buffer(void); |
| 855 | |
| 856 | extern struct event_constraint intel_core2_pebs_event_constraints[]; |
| 857 | |
| 858 | extern struct event_constraint intel_atom_pebs_event_constraints[]; |
| 859 | |
Yan, Zheng | 1fa6418 | 2013-07-18 17:02:24 +0800 | [diff] [blame] | 860 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
| 861 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 862 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
| 863 | |
| 864 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; |
| 865 | |
| 866 | extern struct event_constraint intel_snb_pebs_event_constraints[]; |
| 867 | |
Stephane Eranian | 20a36e3 | 2012-09-11 01:07:01 +0200 | [diff] [blame] | 868 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
| 869 | |
Andi Kleen | 3044318 | 2013-06-17 17:36:49 -0700 | [diff] [blame] | 870 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
| 871 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 872 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
| 873 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 874 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
| 875 | |
| 876 | void intel_pmu_pebs_enable(struct perf_event *event); |
| 877 | |
| 878 | void intel_pmu_pebs_disable(struct perf_event *event); |
| 879 | |
| 880 | void intel_pmu_pebs_enable_all(void); |
| 881 | |
| 882 | void intel_pmu_pebs_disable_all(void); |
| 883 | |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 884 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 885 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 886 | void intel_ds_init(void); |
| 887 | |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 888 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 889 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 890 | void intel_pmu_lbr_reset(void); |
| 891 | |
| 892 | void intel_pmu_lbr_enable(struct perf_event *event); |
| 893 | |
| 894 | void intel_pmu_lbr_disable(struct perf_event *event); |
| 895 | |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 896 | void intel_pmu_lbr_enable_all(bool pmi); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 897 | |
| 898 | void intel_pmu_lbr_disable_all(void); |
| 899 | |
| 900 | void intel_pmu_lbr_read(void); |
| 901 | |
| 902 | void intel_pmu_lbr_init_core(void); |
| 903 | |
| 904 | void intel_pmu_lbr_init_nhm(void); |
| 905 | |
| 906 | void intel_pmu_lbr_init_atom(void); |
| 907 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 908 | void intel_pmu_lbr_init_snb(void); |
| 909 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 910 | void intel_pmu_lbr_init_hsw(void); |
| 911 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 912 | void intel_pmu_lbr_init_skl(void); |
| 913 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 914 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
| 915 | |
Alexander Shishkin | 52ca9ce | 2015-01-30 12:39:52 +0200 | [diff] [blame] | 916 | void intel_pt_interrupt(void); |
| 917 | |
Alexander Shishkin | 8062382 | 2015-01-30 12:40:35 +0200 | [diff] [blame] | 918 | int intel_bts_interrupt(void); |
| 919 | |
| 920 | void intel_bts_enable_local(void); |
| 921 | |
| 922 | void intel_bts_disable_local(void); |
| 923 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 924 | int p4_pmu_init(void); |
| 925 | |
| 926 | int p6_pmu_init(void); |
| 927 | |
Vince Weaver | e717bf4 | 2012-09-26 14:12:52 -0400 | [diff] [blame] | 928 | int knc_pmu_init(void); |
| 929 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 930 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 931 | char *page); |
| 932 | |
Stephane Eranian | b37609c | 2014-11-17 20:07:04 +0100 | [diff] [blame] | 933 | static inline int is_ht_workaround_enabled(void) |
| 934 | { |
| 935 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
| 936 | } |
Andi Kleen | 47732d8 | 2015-06-29 14:22:13 -0700 | [diff] [blame] | 937 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 938 | #else /* CONFIG_CPU_SUP_INTEL */ |
| 939 | |
| 940 | static inline void reserve_ds_buffers(void) |
| 941 | { |
| 942 | } |
| 943 | |
| 944 | static inline void release_ds_buffers(void) |
| 945 | { |
| 946 | } |
| 947 | |
| 948 | static inline int intel_pmu_init(void) |
| 949 | { |
| 950 | return 0; |
| 951 | } |
| 952 | |
| 953 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) |
| 954 | { |
| 955 | return NULL; |
| 956 | } |
| 957 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 958 | static inline int is_ht_workaround_enabled(void) |
| 959 | { |
| 960 | return 0; |
| 961 | } |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 962 | #endif /* CONFIG_CPU_SUP_INTEL */ |