Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1 | /* |
| 2 | * Performance events x86 architecture header |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
| 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
| 10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian |
| 11 | * |
| 12 | * For licencing details see kernel-base/COPYING |
| 13 | */ |
| 14 | |
| 15 | #include <linux/perf_event.h> |
| 16 | |
Thomas Gleixner | b50854e | 2021-10-15 03:15:57 +0200 | [diff] [blame] | 17 | #include <asm/fpu/xstate.h> |
Thomas Gleixner | 10043e0 | 2017-12-04 15:07:49 +0100 | [diff] [blame] | 18 | #include <asm/intel_ds.h> |
Kan Liang | d9977c4 | 2021-04-12 07:30:56 -0700 | [diff] [blame] | 19 | #include <asm/cpu.h> |
Thomas Gleixner | 10043e0 | 2017-12-04 15:07:49 +0100 | [diff] [blame] | 20 | |
Andi Kleen | f1ad448 | 2015-12-01 17:01:00 -0800 | [diff] [blame] | 21 | /* To enable MSR tracing please use the generic trace points. */ |
Peter Zijlstra | 1c2ac3f | 2012-05-14 15:25:34 +0200 | [diff] [blame] | 22 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 23 | /* |
| 24 | * | NHM/WSM | SNB | |
| 25 | * register ------------------------------- |
| 26 | * | HT | no HT | HT | no HT | |
| 27 | *----------------------------------------- |
| 28 | * offcore | core | core | cpu | core | |
| 29 | * lbr_sel | core | core | cpu | core | |
| 30 | * ld_lat | cpu | core | cpu | core | |
| 31 | *----------------------------------------- |
| 32 | * |
| 33 | * Given that there is a small number of shared regs, |
| 34 | * we can pre-allocate their slot in the per-cpu |
| 35 | * per-core reg tables. |
| 36 | */ |
| 37 | enum extra_reg_type { |
| 38 | EXTRA_REG_NONE = -1, /* not used */ |
| 39 | |
| 40 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ |
| 41 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
Stephane Eranian | b36817e | 2012-02-09 23:20:53 +0100 | [diff] [blame] | 42 | EXTRA_REG_LBR = 2, /* lbr_select */ |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 43 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
Andi Kleen | d0dc849 | 2015-09-09 14:53:59 -0700 | [diff] [blame] | 44 | EXTRA_REG_FE = 4, /* fe_* */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 45 | |
| 46 | EXTRA_REG_MAX /* number of entries needed */ |
| 47 | }; |
| 48 | |
| 49 | struct event_constraint { |
| 50 | union { |
| 51 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 52 | u64 idxmsk64; |
| 53 | }; |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 54 | u64 code; |
| 55 | u64 cmask; |
| 56 | int weight; |
| 57 | int overlap; |
| 58 | int flags; |
| 59 | unsigned int size; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 60 | }; |
Peter Zijlstra | 1f6a1e2 | 2019-03-14 12:58:52 +0100 | [diff] [blame] | 61 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 62 | static inline bool constraint_match(struct event_constraint *c, u64 ecode) |
| 63 | { |
| 64 | return ((ecode & c->cmask) - c->code) <= (u64)c->size; |
| 65 | } |
| 66 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 67 | /* |
Stephane Eranian | 2f7f73a | 2013-06-20 18:42:54 +0200 | [diff] [blame] | 68 | * struct hw_perf_event.flags flags |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 69 | */ |
Peter Zijlstra | c857eb5 | 2015-04-15 20:14:53 +0200 | [diff] [blame] | 70 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ |
| 71 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ |
| 72 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ |
Peter Zijlstra | 1f6a1e2 | 2019-03-14 12:58:52 +0100 | [diff] [blame] | 73 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */ |
| 74 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ |
| 75 | #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ |
| 76 | #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ |
Rob Herring | 369461c | 2021-12-08 14:11:20 -0600 | [diff] [blame] | 77 | |
Peter Zijlstra | 1f6a1e2 | 2019-03-14 12:58:52 +0100 | [diff] [blame] | 78 | #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ |
| 79 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ |
| 80 | #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 81 | #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ |
Kim Phillips | 471af00 | 2019-11-14 12:37:19 -0600 | [diff] [blame] | 82 | #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ |
Like Xu | e1ad1ac | 2020-06-13 16:09:50 +0800 | [diff] [blame] | 83 | #define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */ |
Kan Liang | 7b2c05a | 2020-07-23 10:11:11 -0700 | [diff] [blame] | 84 | #define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */ |
Kan Liang | 61b985e | 2021-01-28 14:40:10 -0800 | [diff] [blame] | 85 | #define PERF_X86_EVENT_PEBS_STLAT 0x8000 /* st+stlat data address sampling */ |
Kan Liang | 7b2c05a | 2020-07-23 10:11:11 -0700 | [diff] [blame] | 86 | |
| 87 | static inline bool is_topdown_count(struct perf_event *event) |
| 88 | { |
| 89 | return event->hw.flags & PERF_X86_EVENT_TOPDOWN; |
| 90 | } |
| 91 | |
| 92 | static inline bool is_metric_event(struct perf_event *event) |
| 93 | { |
| 94 | u64 config = event->attr.config; |
| 95 | |
| 96 | return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && |
| 97 | ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && |
| 98 | ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); |
| 99 | } |
| 100 | |
| 101 | static inline bool is_slots_event(struct perf_event *event) |
| 102 | { |
| 103 | return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; |
| 104 | } |
| 105 | |
| 106 | static inline bool is_topdown_event(struct perf_event *event) |
| 107 | { |
| 108 | return is_metric_event(event) || is_slots_event(event); |
| 109 | } |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 110 | |
| 111 | struct amd_nb { |
| 112 | int nb_id; /* NorthBridge id */ |
| 113 | int refcnt; /* reference count */ |
| 114 | struct perf_event *owners[X86_PMC_IDX_MAX]; |
| 115 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
| 116 | }; |
| 117 | |
Kan Liang | fd583ad | 2017-04-04 15:14:06 -0400 | [diff] [blame] | 118 | #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 119 | #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) |
| 120 | #define PEBS_OUTPUT_OFFSET 61 |
| 121 | #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) |
| 122 | #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) |
| 123 | #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 124 | |
| 125 | /* |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 126 | * Flags PEBS can handle without an PMI. |
| 127 | * |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 128 | * TID can only be handled by flushing at context switch. |
Andi Kleen | 2fe1bc1 | 2017-08-31 14:46:30 -0700 | [diff] [blame] | 129 | * REGS_USER can be handled for events limited to ring 3. |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 130 | * |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 131 | */ |
Kan Liang | 174afc3 | 2018-03-12 10:45:37 -0400 | [diff] [blame] | 132 | #define LARGE_PEBS_FLAGS \ |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 133 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 134 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
| 135 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ |
Andi Kleen | 2fe1bc1 | 2017-08-31 14:46:30 -0700 | [diff] [blame] | 136 | PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ |
Jiri Olsa | 1197491 | 2018-02-01 09:38:12 +0100 | [diff] [blame] | 137 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
Stephane Eranian | 995f088 | 2020-10-01 06:57:49 -0700 | [diff] [blame] | 138 | PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE) |
Yan, Zheng | 3569c0d | 2015-05-06 15:33:50 -0400 | [diff] [blame] | 139 | |
Kan Liang | 9d5dcc9 | 2019-04-02 12:44:58 -0700 | [diff] [blame] | 140 | #define PEBS_GP_REGS \ |
| 141 | ((1ULL << PERF_REG_X86_AX) | \ |
| 142 | (1ULL << PERF_REG_X86_BX) | \ |
| 143 | (1ULL << PERF_REG_X86_CX) | \ |
| 144 | (1ULL << PERF_REG_X86_DX) | \ |
| 145 | (1ULL << PERF_REG_X86_DI) | \ |
| 146 | (1ULL << PERF_REG_X86_SI) | \ |
| 147 | (1ULL << PERF_REG_X86_SP) | \ |
| 148 | (1ULL << PERF_REG_X86_BP) | \ |
| 149 | (1ULL << PERF_REG_X86_IP) | \ |
| 150 | (1ULL << PERF_REG_X86_FLAGS) | \ |
| 151 | (1ULL << PERF_REG_X86_R8) | \ |
| 152 | (1ULL << PERF_REG_X86_R9) | \ |
| 153 | (1ULL << PERF_REG_X86_R10) | \ |
| 154 | (1ULL << PERF_REG_X86_R11) | \ |
| 155 | (1ULL << PERF_REG_X86_R12) | \ |
| 156 | (1ULL << PERF_REG_X86_R13) | \ |
| 157 | (1ULL << PERF_REG_X86_R14) | \ |
| 158 | (1ULL << PERF_REG_X86_R15)) |
Andi Kleen | 2fe1bc1 | 2017-08-31 14:46:30 -0700 | [diff] [blame] | 159 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 160 | /* |
| 161 | * Per register state. |
| 162 | */ |
| 163 | struct er_account { |
Peter Zijlstra | b800058 | 2016-11-17 18:17:31 +0100 | [diff] [blame] | 164 | raw_spinlock_t lock; /* per-core: protect structure */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 165 | u64 config; /* extra MSR config */ |
| 166 | u64 reg; /* extra MSR number */ |
| 167 | atomic_t ref; /* reference count */ |
| 168 | }; |
| 169 | |
| 170 | /* |
| 171 | * Per core/cpu state |
| 172 | * |
| 173 | * Used to coordinate shared registers between HT threads or |
| 174 | * among events on a single PMU. |
| 175 | */ |
| 176 | struct intel_shared_regs { |
| 177 | struct er_account regs[EXTRA_REG_MAX]; |
| 178 | int refcnt; /* per-core: #HT threads */ |
| 179 | unsigned core_id; /* per-core: core id */ |
| 180 | }; |
| 181 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 182 | enum intel_excl_state_type { |
| 183 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ |
| 184 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ |
| 185 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ |
| 186 | }; |
| 187 | |
| 188 | struct intel_excl_states { |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 189 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
Maria Dimakopoulou | e979121 | 2014-11-17 20:06:58 +0100 | [diff] [blame] | 190 | bool sched_started; /* true if scheduling has started */ |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 191 | }; |
| 192 | |
| 193 | struct intel_excl_cntrs { |
| 194 | raw_spinlock_t lock; |
| 195 | |
| 196 | struct intel_excl_states states[2]; |
| 197 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 198 | union { |
| 199 | u16 has_exclusive[2]; |
| 200 | u32 exclusive_present; |
| 201 | }; |
| 202 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 203 | int refcnt; /* per-core: #HT threads */ |
| 204 | unsigned core_id; /* per-core: core id */ |
| 205 | }; |
| 206 | |
Kan Liang | 8b077e4a | 2018-06-05 08:38:46 -0700 | [diff] [blame] | 207 | struct x86_perf_task_context; |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 208 | #define MAX_LBR_ENTRIES 32 |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 209 | |
Stephane Eranian | 9041346 | 2014-11-17 20:06:54 +0100 | [diff] [blame] | 210 | enum { |
Kan Liang | 9f354a7 | 2020-07-03 05:49:08 -0700 | [diff] [blame] | 211 | LBR_FORMAT_32 = 0x00, |
| 212 | LBR_FORMAT_LIP = 0x01, |
| 213 | LBR_FORMAT_EIP = 0x02, |
| 214 | LBR_FORMAT_EIP_FLAGS = 0x03, |
| 215 | LBR_FORMAT_EIP_FLAGS2 = 0x04, |
| 216 | LBR_FORMAT_INFO = 0x05, |
| 217 | LBR_FORMAT_TIME = 0x06, |
| 218 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME, |
| 219 | }; |
| 220 | |
| 221 | enum { |
Stephane Eranian | 9041346 | 2014-11-17 20:06:54 +0100 | [diff] [blame] | 222 | X86_PERF_KFREE_SHARED = 0, |
| 223 | X86_PERF_KFREE_EXCL = 1, |
| 224 | X86_PERF_KFREE_MAX |
| 225 | }; |
| 226 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 227 | struct cpu_hw_events { |
| 228 | /* |
| 229 | * Generic x86 PMC bits |
| 230 | */ |
| 231 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
| 232 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Kan Liang | 5471eea5 | 2021-06-14 10:59:42 -0700 | [diff] [blame] | 233 | unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 234 | int enabled; |
| 235 | |
Peter Zijlstra | c347a2f | 2014-02-24 12:26:21 +0100 | [diff] [blame] | 236 | int n_events; /* the # of events in the below arrays */ |
| 237 | int n_added; /* the # last events in the below arrays; |
| 238 | they've never been enabled yet */ |
| 239 | int n_txn; /* the # last events in the below arrays; |
| 240 | added in the current transaction */ |
Peter Zijlstra | 871a93b | 2020-10-05 10:09:06 +0200 | [diff] [blame] | 241 | int n_txn_pair; |
Peter Zijlstra | 3dbde69 | 2020-10-05 10:10:24 +0200 | [diff] [blame] | 242 | int n_txn_metric; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 243 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
| 244 | u64 tags[X86_PMC_IDX_MAX]; |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 245 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 246 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 247 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
| 248 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 249 | int n_excl; /* the number of exclusive events */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 250 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 251 | unsigned int txn_flags; |
Peter Zijlstra | 5a425294 | 2012-06-05 15:30:31 +0200 | [diff] [blame] | 252 | int is_fake; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 253 | |
| 254 | /* |
| 255 | * Intel DebugStore bits |
| 256 | */ |
| 257 | struct debug_store *ds; |
Hugh Dickins | c1961a4 | 2017-12-04 15:07:50 +0100 | [diff] [blame] | 258 | void *ds_pebs_vaddr; |
| 259 | void *ds_bts_vaddr; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 260 | u64 pebs_enabled; |
Peter Zijlstra | 09e61b4f | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 261 | int n_pebs; |
| 262 | int n_large_pebs; |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 263 | int n_pebs_via_pt; |
| 264 | int pebs_output; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 265 | |
Kan Liang | c22497f | 2019-04-02 12:45:02 -0700 | [diff] [blame] | 266 | /* Current super set of events hardware configuration */ |
| 267 | u64 pebs_data_cfg; |
| 268 | u64 active_pebs_data_cfg; |
| 269 | int pebs_record_size; |
| 270 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 271 | /* |
| 272 | * Intel LBR bits |
| 273 | */ |
| 274 | int lbr_users; |
Andi Kleen | d3617b98 | 2019-04-02 12:45:03 -0700 | [diff] [blame] | 275 | int lbr_pebs_users; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 276 | struct perf_branch_stack lbr_stack; |
| 277 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
Kan Liang | 49d8184 | 2020-07-03 05:49:15 -0700 | [diff] [blame] | 278 | union { |
| 279 | struct er_account *lbr_sel; |
| 280 | struct er_account *lbr_ctl; |
| 281 | }; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 282 | u64 br_sel; |
Kan Liang | f42be86 | 2020-07-03 05:49:12 -0700 | [diff] [blame] | 283 | void *last_task_ctx; |
Kan Liang | 8b077e4a | 2018-06-05 08:38:46 -0700 | [diff] [blame] | 284 | int last_log_id; |
Like Xu | e1ad1ac | 2020-06-13 16:09:50 +0800 | [diff] [blame] | 285 | int lbr_select; |
Kan Liang | c085fb8 | 2020-07-03 05:49:29 -0700 | [diff] [blame] | 286 | void *lbr_xsave; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 287 | |
| 288 | /* |
Gleb Natapov | 144d31e | 2011-10-05 14:01:21 +0200 | [diff] [blame] | 289 | * Intel host/guest exclude bits |
| 290 | */ |
| 291 | u64 intel_ctrl_guest_mask; |
| 292 | u64 intel_ctrl_host_mask; |
| 293 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; |
| 294 | |
| 295 | /* |
Peter Zijlstra | 2b9e344 | 2013-09-12 12:53:44 +0200 | [diff] [blame] | 296 | * Intel checkpoint mask |
| 297 | */ |
| 298 | u64 intel_cp_status; |
| 299 | |
| 300 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 301 | * manage shared (per-core, per-cpu) registers |
| 302 | * used on Intel NHM/WSM/SNB |
| 303 | */ |
| 304 | struct intel_shared_regs *shared_regs; |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 305 | /* |
| 306 | * manage exclusive counter access between hyperthread |
| 307 | */ |
| 308 | struct event_constraint *constraint_list; /* in enable order */ |
| 309 | struct intel_excl_cntrs *excl_cntrs; |
| 310 | int excl_thread_id; /* 0 or 1 */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 311 | |
| 312 | /* |
Peter Zijlstra (Intel) | 400816f | 2019-03-05 22:23:18 +0100 | [diff] [blame] | 313 | * SKL TSX_FORCE_ABORT shadow |
| 314 | */ |
| 315 | u64 tfa_shadow; |
| 316 | |
| 317 | /* |
Kan Liang | 7b2c05a | 2020-07-23 10:11:11 -0700 | [diff] [blame] | 318 | * Perf Metrics |
| 319 | */ |
| 320 | /* number of accepted metrics events */ |
| 321 | int n_metric; |
| 322 | |
| 323 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 324 | * AMD specific bits |
| 325 | */ |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 326 | struct amd_nb *amd_nb; |
| 327 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ |
| 328 | u64 perf_ctr_virt_mask; |
Kim Phillips | 5738891 | 2019-11-14 12:37:20 -0600 | [diff] [blame] | 329 | int n_pair; /* Large increment events */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 330 | |
Stephane Eranian | 9041346 | 2014-11-17 20:06:54 +0100 | [diff] [blame] | 331 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
Kan Liang | 61e76d5 | 2021-04-12 07:30:43 -0700 | [diff] [blame] | 332 | |
| 333 | struct pmu *pmu; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 334 | }; |
| 335 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 336 | #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 337 | { .idxmsk64 = (n) }, \ |
| 338 | .code = (c), \ |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 339 | .size = (e) - (c), \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 340 | .cmask = (m), \ |
| 341 | .weight = (w), \ |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 342 | .overlap = (o), \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 343 | .flags = f, \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 344 | } |
| 345 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 346 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ |
| 347 | __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) |
| 348 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 349 | #define EVENT_CONSTRAINT(c, n, m) \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 350 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 351 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 352 | /* |
| 353 | * The constraint_match() function only works for 'simple' event codes |
| 354 | * and not for extended (AMD64_EVENTSEL_EVENT) events codes. |
| 355 | */ |
| 356 | #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ |
| 357 | __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) |
| 358 | |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 359 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
| 360 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ |
| 361 | 0, PERF_X86_EVENT_EXCL) |
| 362 | |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 363 | /* |
| 364 | * The overlap flag marks event constraints with overlapping counter |
| 365 | * masks. This is the case if the counter mask of such an event is not |
| 366 | * a subset of any other counter mask of a constraint with an equal or |
| 367 | * higher weight, e.g.: |
| 368 | * |
| 369 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
| 370 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); |
| 371 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); |
| 372 | * |
| 373 | * The event scheduler may not select the correct counter in the first |
| 374 | * cycle because it needs to know which subsequent events will be |
| 375 | * scheduled. It may fail to schedule the events then. So we set the |
| 376 | * overlap flag for such constraints to give the scheduler a hint which |
| 377 | * events to select for counter rescheduling. |
| 378 | * |
| 379 | * Care must be taken as the rescheduling algorithm is O(n!) which |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 380 | * will increase scheduling cycles for an over-committed system |
Robert Richter | bc1738f | 2011-11-18 12:35:22 +0100 | [diff] [blame] | 381 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
| 382 | * and its counter masks must be kept at a minimum. |
| 383 | */ |
| 384 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 385 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 386 | |
| 387 | /* |
| 388 | * Constraint on the Event code. |
| 389 | */ |
| 390 | #define INTEL_EVENT_CONSTRAINT(c, n) \ |
| 391 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) |
| 392 | |
| 393 | /* |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 394 | * Constraint on a range of Event codes |
| 395 | */ |
| 396 | #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
| 397 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) |
| 398 | |
| 399 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 400 | * Constraint on the Event code + UMask + fixed-mask |
| 401 | * |
| 402 | * filter mask to validate fixed counter events. |
| 403 | * the following filters disqualify for fixed counters: |
| 404 | * - inv |
| 405 | * - edge |
| 406 | * - cnt-mask |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 407 | * - in_tx |
| 408 | * - in_tx_checkpointed |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 409 | * The other filters are supported by fixed counters. |
| 410 | * The any-thread option is supported starting with v3. |
| 411 | */ |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 412 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 413 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
Andi Kleen | 3a632cb | 2013-06-17 17:36:48 -0700 | [diff] [blame] | 414 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 415 | |
| 416 | /* |
Kan Liang | 59a854e | 2020-07-23 10:11:13 -0700 | [diff] [blame] | 417 | * The special metric counters do not actually exist. They are calculated from |
| 418 | * the combination of the FxCtr3 + MSR_PERF_METRICS. |
| 419 | * |
| 420 | * The special metric counters are mapped to a dummy offset for the scheduler. |
| 421 | * The sharing between multiple users of the same metric without multiplexing |
| 422 | * is not allowed, even though the hardware supports that in principle. |
| 423 | */ |
| 424 | |
| 425 | #define METRIC_EVENT_CONSTRAINT(c, n) \ |
| 426 | EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ |
| 427 | INTEL_ARCH_EVENT_MASK) |
| 428 | |
| 429 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 430 | * Constraint on the Event code + UMask |
| 431 | */ |
| 432 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
| 433 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
| 434 | |
Andi Kleen | b7883a1 | 2015-11-16 16:21:07 -0800 | [diff] [blame] | 435 | /* Constraint on specific umask bit only + event */ |
| 436 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ |
| 437 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) |
| 438 | |
Andi Kleen | 7550ddf | 2014-09-24 07:34:46 -0700 | [diff] [blame] | 439 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
| 440 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ |
| 441 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) |
| 442 | |
Maria Dimakopoulou | e979121 | 2014-11-17 20:06:58 +0100 | [diff] [blame] | 443 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
| 444 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
| 445 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) |
| 446 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 447 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 448 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 449 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
| 450 | |
Kan Liang | 61b985e | 2021-01-28 14:40:10 -0800 | [diff] [blame] | 451 | #define INTEL_PSD_CONSTRAINT(c, n) \ |
| 452 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 453 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) |
| 454 | |
Stephane Eranian | 9ad64c0 | 2013-01-24 16:10:34 +0100 | [diff] [blame] | 455 | #define INTEL_PST_CONSTRAINT(c, n) \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 456 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
Stephane Eranian | 9ad64c0 | 2013-01-24 16:10:34 +0100 | [diff] [blame] | 457 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
| 458 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 459 | /* Event constraint, but match on all event flags too. */ |
| 460 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ |
Stephane Eranian | 6b89d4c | 2019-05-09 14:45:56 -0700 | [diff] [blame] | 461 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 462 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 463 | #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
Stephane Eranian | 6b89d4c | 2019-05-09 14:45:56 -0700 | [diff] [blame] | 464 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 465 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 466 | /* Check only flags, but allow all event/umask */ |
| 467 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ |
| 468 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) |
| 469 | |
| 470 | /* Check flags and event code, and set the HSW store flag */ |
| 471 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 472 | __EVENT_CONSTRAINT(code, n, \ |
| 473 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
Andi Kleen | f9134f3 | 2013-06-17 17:36:52 -0700 | [diff] [blame] | 474 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 475 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 476 | /* Check flags and event code, and set the HSW load flag */ |
| 477 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 478 | __EVENT_CONSTRAINT(code, n, \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 479 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 480 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 481 | |
Peter Zijlstra | 63b79f6 | 2019-04-02 12:45:04 -0700 | [diff] [blame] | 482 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ |
| 483 | __EVENT_CONSTRAINT_RANGE(code, end, n, \ |
| 484 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 485 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 486 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 487 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 488 | __EVENT_CONSTRAINT(code, n, \ |
| 489 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
| 490 | HWEIGHT(n), 0, \ |
| 491 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 492 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 493 | /* Check flags and event code/umask, and set the HSW store flag */ |
| 494 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ |
| 495 | __EVENT_CONSTRAINT(code, n, \ |
| 496 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 497 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
| 498 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 499 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
| 500 | __EVENT_CONSTRAINT(code, n, \ |
| 501 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 502 | HWEIGHT(n), 0, \ |
| 503 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) |
| 504 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 505 | /* Check flags and event code/umask, and set the HSW load flag */ |
| 506 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ |
| 507 | __EVENT_CONSTRAINT(code, n, \ |
| 508 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 509 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) |
| 510 | |
Maria Dimakopoulou | b63b4b4 | 2014-11-17 20:07:00 +0100 | [diff] [blame] | 511 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
| 512 | __EVENT_CONSTRAINT(code, n, \ |
| 513 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
| 514 | HWEIGHT(n), 0, \ |
| 515 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) |
| 516 | |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 517 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
| 518 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ |
| 519 | __EVENT_CONSTRAINT(code, n, \ |
Jiri Olsa | 169b932 | 2015-11-09 10:24:31 +0100 | [diff] [blame] | 520 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
Andi Kleen | 86a0446 | 2014-08-11 21:27:10 +0200 | [diff] [blame] | 521 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
| 522 | |
| 523 | |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 524 | /* |
| 525 | * We define the end marker as having a weight of -1 |
| 526 | * to enable blacklisting of events using a counter bitmask |
| 527 | * of zero and thus a weight of zero. |
| 528 | * The end marker has a weight that cannot possibly be |
| 529 | * obtained from counting the bits in the bitmask. |
| 530 | */ |
| 531 | #define EVENT_CONSTRAINT_END { .weight = -1 } |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 532 | |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 533 | /* |
| 534 | * Check for end marker with weight == -1 |
| 535 | */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 536 | #define for_each_event_constraint(e, c) \ |
Maria Dimakopoulou | cf30d52 | 2013-12-05 01:24:37 +0200 | [diff] [blame] | 537 | for ((e) = (c); (e)->weight != -1; (e)++) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 538 | |
| 539 | /* |
| 540 | * Extra registers for specific events. |
| 541 | * |
| 542 | * Some events need large masks and require external MSRs. |
| 543 | * Those extra MSRs end up being shared for all events on |
| 544 | * a PMU and sometimes between PMU of sibling HT threads. |
| 545 | * In either case, the kernel needs to handle conflicting |
| 546 | * accesses to those extra, shared, regs. The data structure |
| 547 | * to manage those registers is stored in cpu_hw_event. |
| 548 | */ |
| 549 | struct extra_reg { |
| 550 | unsigned int event; |
| 551 | unsigned int msr; |
| 552 | u64 config_mask; |
| 553 | u64 valid_mask; |
| 554 | int idx; /* per_xxx->regs[] reg index */ |
Kan Liang | 338b522 | 2014-07-14 12:25:56 -0700 | [diff] [blame] | 555 | bool extra_msr_access; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 556 | }; |
| 557 | |
| 558 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
Kan Liang | 338b522 | 2014-07-14 12:25:56 -0700 | [diff] [blame] | 559 | .event = (e), \ |
| 560 | .msr = (ms), \ |
| 561 | .config_mask = (m), \ |
| 562 | .valid_mask = (vm), \ |
| 563 | .idx = EXTRA_REG_##i, \ |
| 564 | .extra_msr_access = true, \ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 565 | } |
| 566 | |
| 567 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 568 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) |
| 569 | |
Stephane Eranian | f20093e | 2013-01-24 16:10:32 +0100 | [diff] [blame] | 570 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
| 571 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ |
| 572 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) |
| 573 | |
| 574 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ |
| 575 | INTEL_UEVENT_EXTRA_REG(c, \ |
| 576 | MSR_PEBS_LD_LAT_THRESHOLD, \ |
| 577 | 0xffff, \ |
| 578 | LDLAT) |
| 579 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 580 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
| 581 | |
| 582 | union perf_capabilities { |
| 583 | struct { |
| 584 | u64 lbr_format:6; |
| 585 | u64 pebs_trap:1; |
| 586 | u64 pebs_arch_reg:1; |
| 587 | u64 pebs_format:4; |
| 588 | u64 smm_freeze:1; |
Andi Kleen | 069e0c3 | 2013-06-25 08:12:33 -0700 | [diff] [blame] | 589 | /* |
| 590 | * PMU supports separate counter range for writing |
| 591 | * values > 32bit. |
| 592 | */ |
| 593 | u64 full_width_write:1; |
Kan Liang | c22497f | 2019-04-02 12:45:02 -0700 | [diff] [blame] | 594 | u64 pebs_baseline:1; |
Kan Liang | bbdbde2 | 2020-07-23 10:11:08 -0700 | [diff] [blame] | 595 | u64 perf_metrics:1; |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 596 | u64 pebs_output_pt_available:1; |
Stephane Eranian | cadbaa0 | 2020-10-28 12:42:47 -0700 | [diff] [blame] | 597 | u64 anythread_deprecated:1; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 598 | }; |
| 599 | u64 capabilities; |
| 600 | }; |
| 601 | |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 602 | struct x86_pmu_quirk { |
| 603 | struct x86_pmu_quirk *next; |
| 604 | void (*func)(void); |
| 605 | }; |
| 606 | |
Peter Zijlstra | f9b4eeb | 2012-03-12 12:44:35 +0100 | [diff] [blame] | 607 | union x86_pmu_config { |
| 608 | struct { |
| 609 | u64 event:8, |
| 610 | umask:8, |
| 611 | usr:1, |
| 612 | os:1, |
| 613 | edge:1, |
| 614 | pc:1, |
| 615 | interrupt:1, |
| 616 | __reserved1:1, |
| 617 | en:1, |
| 618 | inv:1, |
| 619 | cmask:8, |
| 620 | event2:4, |
| 621 | __reserved2:4, |
| 622 | go:1, |
| 623 | ho:1; |
| 624 | } bits; |
| 625 | u64 value; |
| 626 | }; |
| 627 | |
| 628 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value |
| 629 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 630 | enum { |
| 631 | x86_lbr_exclusive_lbr, |
Alexander Shishkin | 8062382 | 2015-01-30 12:40:35 +0200 | [diff] [blame] | 632 | x86_lbr_exclusive_bts, |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 633 | x86_lbr_exclusive_pt, |
| 634 | x86_lbr_exclusive_max, |
| 635 | }; |
| 636 | |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 637 | struct x86_hybrid_pmu { |
| 638 | struct pmu pmu; |
Kan Liang | d9977c4 | 2021-04-12 07:30:56 -0700 | [diff] [blame] | 639 | const char *name; |
| 640 | u8 cpu_type; |
| 641 | cpumask_t supported_cpus; |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 642 | union perf_capabilities intel_cap; |
Kan Liang | fc4b8fc | 2021-04-12 07:30:45 -0700 | [diff] [blame] | 643 | u64 intel_ctrl; |
Kan Liang | d4b294b | 2021-04-12 07:30:46 -0700 | [diff] [blame] | 644 | int max_pebs_events; |
| 645 | int num_counters; |
| 646 | int num_counters_fixed; |
Kan Liang | eaacf07 | 2021-04-12 07:30:47 -0700 | [diff] [blame] | 647 | struct event_constraint unconstrained; |
Kan Liang | 0d18f2d | 2021-04-12 07:30:48 -0700 | [diff] [blame] | 648 | |
| 649 | u64 hw_cache_event_ids |
| 650 | [PERF_COUNT_HW_CACHE_MAX] |
| 651 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 652 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 653 | u64 hw_cache_extra_regs |
| 654 | [PERF_COUNT_HW_CACHE_MAX] |
| 655 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 656 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
Kan Liang | 24ee38f | 2021-04-12 07:30:49 -0700 | [diff] [blame] | 657 | struct event_constraint *event_constraints; |
| 658 | struct event_constraint *pebs_constraints; |
Kan Liang | 183af73 | 2021-04-12 07:30:50 -0700 | [diff] [blame] | 659 | struct extra_reg *extra_regs; |
Kan Liang | acade63 | 2021-08-03 06:25:28 -0700 | [diff] [blame] | 660 | |
| 661 | unsigned int late_ack :1, |
| 662 | mid_ack :1, |
| 663 | enabled_ack :1; |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 664 | }; |
| 665 | |
| 666 | static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) |
| 667 | { |
| 668 | return container_of(pmu, struct x86_hybrid_pmu, pmu); |
| 669 | } |
| 670 | |
| 671 | extern struct static_key_false perf_is_hybrid; |
| 672 | #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) |
| 673 | |
| 674 | #define hybrid(_pmu, _field) \ |
| 675 | (*({ \ |
| 676 | typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ |
| 677 | \ |
| 678 | if (is_hybrid() && (_pmu)) \ |
| 679 | __Fp = &hybrid_pmu(_pmu)->_field; \ |
| 680 | \ |
| 681 | __Fp; \ |
| 682 | })) |
| 683 | |
Kan Liang | eaacf07 | 2021-04-12 07:30:47 -0700 | [diff] [blame] | 684 | #define hybrid_var(_pmu, _var) \ |
| 685 | (*({ \ |
| 686 | typeof(&_var) __Fp = &_var; \ |
| 687 | \ |
| 688 | if (is_hybrid() && (_pmu)) \ |
| 689 | __Fp = &hybrid_pmu(_pmu)->_var; \ |
| 690 | \ |
| 691 | __Fp; \ |
| 692 | })) |
| 693 | |
Kan Liang | acade63 | 2021-08-03 06:25:28 -0700 | [diff] [blame] | 694 | #define hybrid_bit(_pmu, _field) \ |
| 695 | ({ \ |
| 696 | bool __Fp = x86_pmu._field; \ |
| 697 | \ |
| 698 | if (is_hybrid() && (_pmu)) \ |
| 699 | __Fp = hybrid_pmu(_pmu)->_field; \ |
| 700 | \ |
| 701 | __Fp; \ |
| 702 | }) |
| 703 | |
Kan Liang | d9977c4 | 2021-04-12 07:30:56 -0700 | [diff] [blame] | 704 | enum hybrid_pmu_type { |
| 705 | hybrid_big = 0x40, |
| 706 | hybrid_small = 0x20, |
| 707 | |
| 708 | hybrid_big_small = hybrid_big | hybrid_small, |
| 709 | }; |
| 710 | |
Kan Liang | f83d2f9 | 2021-04-12 07:31:00 -0700 | [diff] [blame] | 711 | #define X86_HYBRID_PMU_ATOM_IDX 0 |
| 712 | #define X86_HYBRID_PMU_CORE_IDX 1 |
| 713 | |
| 714 | #define X86_HYBRID_NUM_PMUS 2 |
| 715 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 716 | /* |
| 717 | * struct x86_pmu - generic x86 pmu |
| 718 | */ |
| 719 | struct x86_pmu { |
| 720 | /* |
| 721 | * Generic x86 PMC bits |
| 722 | */ |
| 723 | const char *name; |
| 724 | int version; |
| 725 | int (*handle_irq)(struct pt_regs *); |
| 726 | void (*disable_all)(void); |
| 727 | void (*enable_all)(int added); |
| 728 | void (*enable)(struct perf_event *); |
| 729 | void (*disable)(struct perf_event *); |
Adrian Hunter | 8b8ff8c | 2021-09-07 19:39:01 +0300 | [diff] [blame] | 730 | void (*assign)(struct perf_event *event, int idx); |
Peter Zijlstra | 68f7082 | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 731 | void (*add)(struct perf_event *); |
| 732 | void (*del)(struct perf_event *); |
Kan Liang | bcfbe5c | 2018-02-12 14:20:32 -0800 | [diff] [blame] | 733 | void (*read)(struct perf_event *event); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 734 | int (*hw_config)(struct perf_event *event); |
| 735 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
| 736 | unsigned eventsel; |
| 737 | unsigned perfctr; |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 738 | int (*addr_offset)(int index, bool eventsel); |
Jacob Shin | 0fbdad0 | 2013-02-06 11:26:28 -0600 | [diff] [blame] | 739 | int (*rdpmc_index)(int index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 740 | u64 (*event_map)(int); |
| 741 | int max_events; |
| 742 | int num_counters; |
| 743 | int num_counters_fixed; |
| 744 | int cntval_bits; |
| 745 | u64 cntval_mask; |
Gleb Natapov | ffb871b | 2011-11-10 14:57:26 +0200 | [diff] [blame] | 746 | union { |
| 747 | unsigned long events_maskl; |
| 748 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; |
| 749 | }; |
| 750 | int events_mask_len; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 751 | int apic; |
| 752 | u64 max_period; |
| 753 | struct event_constraint * |
| 754 | (*get_event_constraints)(struct cpu_hw_events *cpuc, |
Stephane Eranian | 79cba82 | 2014-11-17 20:06:56 +0100 | [diff] [blame] | 755 | int idx, |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 756 | struct perf_event *event); |
| 757 | |
| 758 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
| 759 | struct perf_event *event); |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 760 | |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 761 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
| 762 | |
Peter Zijlstra | 0c41e75 | 2015-05-21 10:57:32 +0200 | [diff] [blame] | 763 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
| 764 | |
Maria Dimakopoulou | c5362c0 | 2014-11-17 20:06:55 +0100 | [diff] [blame] | 765 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
| 766 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 767 | struct event_constraint *event_constraints; |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 768 | struct x86_pmu_quirk *quirks; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 769 | int perfctr_second_write; |
Kan Liang | f605cfc | 2018-03-01 12:54:54 -0500 | [diff] [blame] | 770 | u64 (*limit_period)(struct perf_event *event, u64 l); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 771 | |
Andi Kleen | af3bdb9 | 2018-08-08 00:12:07 -0700 | [diff] [blame] | 772 | /* PMI handler bits */ |
| 773 | unsigned int late_ack :1, |
Kan Liang | acade63 | 2021-08-03 06:25:28 -0700 | [diff] [blame] | 774 | mid_ack :1, |
Peter Zijlstra | 3daa96d | 2020-11-10 16:37:51 +0100 | [diff] [blame] | 775 | enabled_ack :1; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 776 | /* |
| 777 | * sysfs attrs |
| 778 | */ |
Peter Zijlstra | e97df76 | 2014-02-05 20:48:51 +0100 | [diff] [blame] | 779 | int attr_rdpmc_broken; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 780 | int attr_rdpmc; |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 781 | struct attribute **format_attrs; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 782 | |
Jiri Olsa | a474739 | 2012-10-10 14:53:11 +0200 | [diff] [blame] | 783 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
Jiri Olsa | baa0c83 | 2019-05-12 17:55:13 +0200 | [diff] [blame] | 784 | const struct attribute_group **attr_update; |
Jiri Olsa | a474739 | 2012-10-10 14:53:11 +0200 | [diff] [blame] | 785 | |
Kan Liang | 6089327 | 2017-05-12 07:51:13 -0700 | [diff] [blame] | 786 | unsigned long attr_freeze_on_smi; |
Kan Liang | 6089327 | 2017-05-12 07:51:13 -0700 | [diff] [blame] | 787 | |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 788 | /* |
| 789 | * CPU Hotplug hooks |
| 790 | */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 791 | int (*cpu_prepare)(int cpu); |
| 792 | void (*cpu_starting)(int cpu); |
| 793 | void (*cpu_dying)(int cpu); |
| 794 | void (*cpu_dead)(int cpu); |
Peter Zijlstra | c93dc84 | 2012-06-08 14:50:50 +0200 | [diff] [blame] | 795 | |
| 796 | void (*check_microcode)(void); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 797 | void (*sched_task)(struct perf_event_context *ctx, |
| 798 | bool sched_in); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 799 | |
| 800 | /* |
| 801 | * Intel Arch Perfmon v2+ |
| 802 | */ |
| 803 | u64 intel_ctrl; |
| 804 | union perf_capabilities intel_cap; |
| 805 | |
| 806 | /* |
| 807 | * Intel DebugStore bits |
| 808 | */ |
Andi Kleen | 9b545c0 | 2019-02-04 14:23:30 -0800 | [diff] [blame] | 809 | unsigned int bts :1, |
| 810 | bts_active :1, |
| 811 | pebs :1, |
| 812 | pebs_active :1, |
| 813 | pebs_broken :1, |
| 814 | pebs_prec_dist :1, |
| 815 | pebs_no_tlb :1, |
Kan Liang | 61b985e | 2021-01-28 14:40:10 -0800 | [diff] [blame] | 816 | pebs_no_isolation :1, |
| 817 | pebs_block :1; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 818 | int pebs_record_size; |
Jiri Olsa | e72daf3 | 2016-03-01 20:03:52 +0100 | [diff] [blame] | 819 | int pebs_buffer_size; |
Kan Liang | c22497f | 2019-04-02 12:45:02 -0700 | [diff] [blame] | 820 | int max_pebs_events; |
Peter Zijlstra | 9dfa9a5 | 2020-10-30 14:58:48 +0100 | [diff] [blame] | 821 | void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 822 | struct event_constraint *pebs_constraints; |
Peter Zijlstra | 0780c92 | 2012-06-05 10:26:43 +0200 | [diff] [blame] | 823 | void (*pebs_aliases)(struct perf_event *event); |
Kan Liang | 174afc3 | 2018-03-12 10:45:37 -0400 | [diff] [blame] | 824 | unsigned long large_pebs_flags; |
Kan Liang | c22497f | 2019-04-02 12:45:02 -0700 | [diff] [blame] | 825 | u64 rtm_abort_event; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 826 | |
| 827 | /* |
| 828 | * Intel LBR |
| 829 | */ |
Wei Wang | 3cb9d54 | 2020-06-13 16:09:46 +0800 | [diff] [blame] | 830 | unsigned int lbr_tos, lbr_from, lbr_to, |
Kan Liang | fda1f99 | 2020-07-03 05:49:18 -0700 | [diff] [blame] | 831 | lbr_info, lbr_nr; /* LBR base regs and size */ |
Kan Liang | 49d8184 | 2020-07-03 05:49:15 -0700 | [diff] [blame] | 832 | union { |
| 833 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
| 834 | u64 lbr_ctl_mask; /* LBR_CTL valid bits */ |
| 835 | }; |
| 836 | union { |
| 837 | const int *lbr_sel_map; /* lbr_select mappings */ |
| 838 | int *lbr_ctl_map; /* LBR_CTL mappings */ |
| 839 | }; |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 840 | bool lbr_double_abort; /* duplicated lbr aborts */ |
Andi Kleen | b0c1ef5 | 2016-12-08 16:14:17 -0800 | [diff] [blame] | 841 | bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 842 | |
Kan Liang | af6cf12 | 2020-07-03 05:49:14 -0700 | [diff] [blame] | 843 | /* |
| 844 | * Intel Architectural LBR CPUID Enumeration |
| 845 | */ |
| 846 | unsigned int lbr_depth_mask:8; |
| 847 | unsigned int lbr_deep_c_reset:1; |
| 848 | unsigned int lbr_lip:1; |
| 849 | unsigned int lbr_cpl:1; |
| 850 | unsigned int lbr_filter:1; |
| 851 | unsigned int lbr_call_stack:1; |
| 852 | unsigned int lbr_mispred:1; |
| 853 | unsigned int lbr_timed_lbr:1; |
| 854 | unsigned int lbr_br_type:1; |
| 855 | |
Kan Liang | 9f354a7 | 2020-07-03 05:49:08 -0700 | [diff] [blame] | 856 | void (*lbr_reset)(void); |
Kan Liang | c301b1d | 2020-07-03 05:49:09 -0700 | [diff] [blame] | 857 | void (*lbr_read)(struct cpu_hw_events *cpuc); |
Kan Liang | 799571b | 2020-07-03 05:49:10 -0700 | [diff] [blame] | 858 | void (*lbr_save)(void *ctx); |
| 859 | void (*lbr_restore)(void *ctx); |
Kan Liang | 9f354a7 | 2020-07-03 05:49:08 -0700 | [diff] [blame] | 860 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 861 | /* |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 862 | * Intel PT/LBR/BTS are exclusive |
| 863 | */ |
| 864 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; |
| 865 | |
| 866 | /* |
Kan Liang | 7b2c05a | 2020-07-23 10:11:11 -0700 | [diff] [blame] | 867 | * Intel perf metrics |
| 868 | */ |
Kan Liang | 1ab5f23 | 2021-01-28 14:40:09 -0800 | [diff] [blame] | 869 | int num_topdown_events; |
Kan Liang | 7b2c05a | 2020-07-23 10:11:11 -0700 | [diff] [blame] | 870 | u64 (*update_topdown_event)(struct perf_event *event); |
| 871 | int (*set_topdown_event_period)(struct perf_event *event); |
| 872 | |
| 873 | /* |
Alexey Budankov | fc1adfe | 2019-10-23 10:11:04 +0300 | [diff] [blame] | 874 | * perf task context (i.e. struct perf_event_context::task_ctx_data) |
| 875 | * switch helper to bridge calls from perf/core to perf/x86. |
| 876 | * See struct pmu::swap_task_ctx() usage for examples; |
| 877 | */ |
| 878 | void (*swap_task_ctx)(struct perf_event_context *prev, |
| 879 | struct perf_event_context *next); |
| 880 | |
| 881 | /* |
Peter Zijlstra | 32b62f4 | 2016-03-25 15:52:35 +0100 | [diff] [blame] | 882 | * AMD bits |
| 883 | */ |
| 884 | unsigned int amd_nb_constraints : 1; |
Kim Phillips | 5738891 | 2019-11-14 12:37:20 -0600 | [diff] [blame] | 885 | u64 perf_ctr_pair_en; |
Peter Zijlstra | 32b62f4 | 2016-03-25 15:52:35 +0100 | [diff] [blame] | 886 | |
| 887 | /* |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 888 | * Extra registers for events |
| 889 | */ |
| 890 | struct extra_reg *extra_regs; |
Stephane Eranian | 9a5e3fb | 2014-11-17 20:06:53 +0100 | [diff] [blame] | 891 | unsigned int flags; |
Gleb Natapov | 144d31e | 2011-10-05 14:01:21 +0200 | [diff] [blame] | 892 | |
| 893 | /* |
| 894 | * Intel host/guest support (KVM) |
| 895 | */ |
| 896 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 897 | |
| 898 | /* |
| 899 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. |
| 900 | */ |
| 901 | int (*check_period) (struct perf_event *event, u64 period); |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 902 | |
| 903 | int (*aux_output_match) (struct perf_event *event); |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 904 | |
Kan Liang | 3e9a8b2 | 2021-04-12 07:30:59 -0700 | [diff] [blame] | 905 | int (*filter_match)(struct perf_event *event); |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 906 | /* |
| 907 | * Hybrid support |
| 908 | * |
| 909 | * Most PMU capabilities are the same among different hybrid PMUs. |
| 910 | * The global x86_pmu saves the architecture capabilities, which |
| 911 | * are available for all PMUs. The hybrid_pmu only includes the |
| 912 | * unique capabilities. |
| 913 | */ |
Kan Liang | d4b294b | 2021-04-12 07:30:46 -0700 | [diff] [blame] | 914 | int num_hybrid_pmus; |
Kan Liang | d0946a8 | 2021-04-12 07:30:44 -0700 | [diff] [blame] | 915 | struct x86_hybrid_pmu *hybrid_pmu; |
Kan Liang | d9977c4 | 2021-04-12 07:30:56 -0700 | [diff] [blame] | 916 | u8 (*get_hybrid_cpu_type) (void); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 917 | }; |
| 918 | |
Kan Liang | 530bfff | 2020-07-03 05:49:11 -0700 | [diff] [blame] | 919 | struct x86_perf_task_context_opt { |
| 920 | int lbr_callstack_users; |
| 921 | int lbr_stack_state; |
| 922 | int log_id; |
| 923 | }; |
| 924 | |
Yan, Zheng | e18bf52 | 2014-11-04 21:56:03 -0500 | [diff] [blame] | 925 | struct x86_perf_task_context { |
Like Xu | e1ad1ac | 2020-06-13 16:09:50 +0800 | [diff] [blame] | 926 | u64 lbr_sel; |
Andi Kleen | b28ae95 | 2015-10-20 11:46:33 -0700 | [diff] [blame] | 927 | int tos; |
Kan Liang | 0592e57 | 2018-06-05 08:38:45 -0700 | [diff] [blame] | 928 | int valid_lbrs; |
Kan Liang | 530bfff | 2020-07-03 05:49:11 -0700 | [diff] [blame] | 929 | struct x86_perf_task_context_opt opt; |
Kan Liang | 5624986 | 2020-07-03 05:49:16 -0700 | [diff] [blame] | 930 | struct lbr_entry lbr[MAX_LBR_ENTRIES]; |
Yan, Zheng | e18bf52 | 2014-11-04 21:56:03 -0500 | [diff] [blame] | 931 | }; |
| 932 | |
Kan Liang | 47125db | 2020-07-03 05:49:20 -0700 | [diff] [blame] | 933 | struct x86_perf_task_context_arch_lbr { |
| 934 | struct x86_perf_task_context_opt opt; |
| 935 | struct lbr_entry entries[]; |
| 936 | }; |
| 937 | |
Kan Liang | ce711ea | 2020-07-03 05:49:28 -0700 | [diff] [blame] | 938 | /* |
| 939 | * Add padding to guarantee the 64-byte alignment of the state buffer. |
| 940 | * |
| 941 | * The structure is dynamically allocated. The size of the LBR state may vary |
| 942 | * based on the number of LBR registers. |
| 943 | * |
| 944 | * Do not put anything after the LBR state. |
| 945 | */ |
| 946 | struct x86_perf_task_context_arch_lbr_xsave { |
| 947 | struct x86_perf_task_context_opt opt; |
| 948 | |
| 949 | union { |
| 950 | struct xregs_state xsave; |
| 951 | struct { |
| 952 | struct fxregs_state i387; |
| 953 | struct xstate_header header; |
| 954 | struct arch_lbr_state lbr; |
| 955 | } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); |
| 956 | }; |
| 957 | }; |
| 958 | |
Peter Zijlstra | c1d6f42 | 2011-12-06 14:07:15 +0100 | [diff] [blame] | 959 | #define x86_add_quirk(func_) \ |
| 960 | do { \ |
| 961 | static struct x86_pmu_quirk __quirk __initdata = { \ |
| 962 | .func = func_, \ |
| 963 | }; \ |
| 964 | __quirk.next = x86_pmu.quirks; \ |
| 965 | x86_pmu.quirks = &__quirk; \ |
| 966 | } while (0) |
| 967 | |
Stephane Eranian | 9a5e3fb | 2014-11-17 20:06:53 +0100 | [diff] [blame] | 968 | /* |
| 969 | * x86_pmu flags |
| 970 | */ |
| 971 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ |
| 972 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ |
Maria Dimakopoulou | 6f6539c | 2014-11-17 20:06:57 +0100 | [diff] [blame] | 973 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
Stephane Eranian | b37609c | 2014-11-17 20:07:04 +0100 | [diff] [blame] | 974 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
Kan Liang | 3196234 | 2018-03-08 18:15:39 -0800 | [diff] [blame] | 975 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
Peter Zijlstra (Intel) | 400816f | 2019-03-05 22:23:18 +0100 | [diff] [blame] | 976 | #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
Kim Phillips | 471af00 | 2019-11-14 12:37:19 -0600 | [diff] [blame] | 977 | #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ |
Kan Liang | 61b985e | 2021-01-28 14:40:10 -0800 | [diff] [blame] | 978 | #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ |
| 979 | #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 980 | |
Stephane Eranian | 3a54aaa | 2013-01-24 16:10:26 +0100 | [diff] [blame] | 981 | #define EVENT_VAR(_id) event_attr_##_id |
| 982 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
| 983 | |
| 984 | #define EVENT_ATTR(_name, _id) \ |
| 985 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ |
| 986 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 987 | .id = PERF_COUNT_HW_##_id, \ |
| 988 | .event_str = NULL, \ |
| 989 | }; |
| 990 | |
| 991 | #define EVENT_ATTR_STR(_name, v, str) \ |
| 992 | static struct perf_pmu_events_attr event_attr_##v = { \ |
| 993 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ |
| 994 | .id = 0, \ |
| 995 | .event_str = str, \ |
| 996 | }; |
| 997 | |
Andi Kleen | fc07e9f | 2016-05-19 17:09:56 -0700 | [diff] [blame] | 998 | #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ |
| 999 | static struct perf_pmu_events_ht_attr event_attr_##v = { \ |
| 1000 | .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ |
| 1001 | .id = 0, \ |
| 1002 | .event_str_noht = noht, \ |
| 1003 | .event_str_ht = ht, \ |
| 1004 | } |
| 1005 | |
Kan Liang | a9c81ccd | 2021-04-12 07:30:57 -0700 | [diff] [blame] | 1006 | #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ |
| 1007 | static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ |
| 1008 | .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ |
| 1009 | .id = 0, \ |
| 1010 | .event_str = str, \ |
| 1011 | .pmu_type = _pmu, \ |
| 1012 | } |
| 1013 | |
| 1014 | #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) |
| 1015 | |
| 1016 | #define FORMAT_ATTR_HYBRID(_name, _pmu) \ |
| 1017 | static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ |
| 1018 | .attr = __ATTR_RO(_name), \ |
| 1019 | .pmu_type = _pmu, \ |
| 1020 | } |
| 1021 | |
Kan Liang | 61e76d5 | 2021-04-12 07:30:43 -0700 | [diff] [blame] | 1022 | struct pmu *x86_get_pmu(unsigned int cpu); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1023 | extern struct x86_pmu x86_pmu __read_mostly; |
| 1024 | |
Kan Liang | f42be86 | 2020-07-03 05:49:12 -0700 | [diff] [blame] | 1025 | static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) |
| 1026 | { |
Kan Liang | 47125db | 2020-07-03 05:49:20 -0700 | [diff] [blame] | 1027 | if (static_cpu_has(X86_FEATURE_ARCH_LBR)) |
| 1028 | return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; |
| 1029 | |
Kan Liang | f42be86 | 2020-07-03 05:49:12 -0700 | [diff] [blame] | 1030 | return &((struct x86_perf_task_context *)ctx)->opt; |
| 1031 | } |
| 1032 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 1033 | static inline bool x86_pmu_has_lbr_callstack(void) |
| 1034 | { |
| 1035 | return x86_pmu.lbr_sel_map && |
| 1036 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; |
| 1037 | } |
| 1038 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1039 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
| 1040 | |
| 1041 | int x86_perf_event_set_period(struct perf_event *event); |
| 1042 | |
| 1043 | /* |
| 1044 | * Generalized hw caching related hw_event table, filled |
| 1045 | * in on a per model basis. A value of 0 means |
| 1046 | * 'not supported', -1 means 'hw_event makes no sense on |
| 1047 | * this CPU', any other value means the raw hw_event |
| 1048 | * ID. |
| 1049 | */ |
| 1050 | |
| 1051 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 1052 | |
| 1053 | extern u64 __read_mostly hw_cache_event_ids |
| 1054 | [PERF_COUNT_HW_CACHE_MAX] |
| 1055 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 1056 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 1057 | extern u64 __read_mostly hw_cache_extra_regs |
| 1058 | [PERF_COUNT_HW_CACHE_MAX] |
| 1059 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 1060 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 1061 | |
| 1062 | u64 x86_perf_event_update(struct perf_event *event); |
| 1063 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1064 | static inline unsigned int x86_pmu_config_addr(int index) |
| 1065 | { |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 1066 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
| 1067 | x86_pmu.addr_offset(index, true) : index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1068 | } |
| 1069 | |
| 1070 | static inline unsigned int x86_pmu_event_addr(int index) |
| 1071 | { |
Jacob Shin | 4c1fd17 | 2013-02-06 11:26:27 -0600 | [diff] [blame] | 1072 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
| 1073 | x86_pmu.addr_offset(index, false) : index); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1074 | } |
| 1075 | |
Jacob Shin | 0fbdad0 | 2013-02-06 11:26:28 -0600 | [diff] [blame] | 1076 | static inline int x86_pmu_rdpmc_index(int index) |
| 1077 | { |
| 1078 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
| 1079 | } |
| 1080 | |
Kan Liang | fc4b8fc | 2021-04-12 07:30:45 -0700 | [diff] [blame] | 1081 | bool check_hw_exists(struct pmu *pmu, int num_counters, |
| 1082 | int num_counters_fixed); |
| 1083 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 1084 | int x86_add_exclusive(unsigned int what); |
| 1085 | |
| 1086 | void x86_del_exclusive(unsigned int what); |
| 1087 | |
Alexander Shishkin | 6b099d9 | 2015-06-11 15:13:56 +0300 | [diff] [blame] | 1088 | int x86_reserve_hardware(void); |
| 1089 | |
| 1090 | void x86_release_hardware(void); |
| 1091 | |
Andi Kleen | b00233b | 2017-08-22 11:52:01 -0700 | [diff] [blame] | 1092 | int x86_pmu_max_precise(void); |
| 1093 | |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 1094 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
| 1095 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1096 | int x86_setup_perfctr(struct perf_event *event); |
| 1097 | |
| 1098 | int x86_pmu_hw_config(struct perf_event *event); |
| 1099 | |
| 1100 | void x86_pmu_disable_all(void); |
| 1101 | |
Kim Phillips | 5738891 | 2019-11-14 12:37:20 -0600 | [diff] [blame] | 1102 | static inline bool is_counter_pair(struct hw_perf_event *hwc) |
| 1103 | { |
| 1104 | return hwc->flags & PERF_X86_EVENT_PAIR; |
| 1105 | } |
| 1106 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1107 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
| 1108 | u64 enable_mask) |
| 1109 | { |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 1110 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
| 1111 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1112 | if (hwc->extra_reg.reg) |
| 1113 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
Kim Phillips | 5738891 | 2019-11-14 12:37:20 -0600 | [diff] [blame] | 1114 | |
| 1115 | /* |
| 1116 | * Add enabled Merge event on next counter |
| 1117 | * if large increment event being enabled on this counter |
| 1118 | */ |
| 1119 | if (is_counter_pair(hwc)) |
| 1120 | wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); |
| 1121 | |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 1122 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1123 | } |
| 1124 | |
| 1125 | void x86_pmu_enable_all(int added); |
| 1126 | |
Peter Zijlstra | b371b59 | 2015-05-21 10:57:13 +0200 | [diff] [blame] | 1127 | int perf_assign_events(struct event_constraint **constraints, int n, |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 1128 | int wmin, int wmax, int gpmax, int *assign); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1129 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
| 1130 | |
| 1131 | void x86_pmu_stop(struct perf_event *event, int flags); |
| 1132 | |
| 1133 | static inline void x86_pmu_disable_event(struct perf_event *event) |
| 1134 | { |
Like Xu | df51fe7 | 2021-08-02 15:08:50 +0800 | [diff] [blame] | 1135 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1136 | struct hw_perf_event *hwc = &event->hw; |
| 1137 | |
Like Xu | df51fe7 | 2021-08-02 15:08:50 +0800 | [diff] [blame] | 1138 | wrmsrl(hwc->config_base, hwc->config & ~disable_mask); |
Kim Phillips | 5738891 | 2019-11-14 12:37:20 -0600 | [diff] [blame] | 1139 | |
| 1140 | if (is_counter_pair(hwc)) |
| 1141 | wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | void x86_pmu_enable_event(struct perf_event *event); |
| 1145 | |
| 1146 | int x86_pmu_handle_irq(struct pt_regs *regs); |
| 1147 | |
Kan Liang | e11c1a7 | 2021-04-12 07:30:55 -0700 | [diff] [blame] | 1148 | void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, |
| 1149 | u64 intel_ctrl); |
| 1150 | |
Kan Liang | d9977c4 | 2021-04-12 07:30:56 -0700 | [diff] [blame] | 1151 | void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu); |
| 1152 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1153 | extern struct event_constraint emptyconstraint; |
| 1154 | |
| 1155 | extern struct event_constraint unconstrained; |
| 1156 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 1157 | static inline bool kernel_ip(unsigned long ip) |
| 1158 | { |
| 1159 | #ifdef CONFIG_X86_32 |
| 1160 | return ip > PAGE_OFFSET; |
| 1161 | #else |
| 1162 | return (long)ip < 0; |
| 1163 | #endif |
| 1164 | } |
| 1165 | |
Peter Zijlstra | d07bdfd | 2012-07-10 09:42:15 +0200 | [diff] [blame] | 1166 | /* |
| 1167 | * Not all PMUs provide the right context information to place the reported IP |
| 1168 | * into full context. Specifically segment registers are typically not |
| 1169 | * supplied. |
| 1170 | * |
| 1171 | * Assuming the address is a linear address (it is for IBS), we fake the CS and |
| 1172 | * vm86 mode using the known zero-based code segment and 'fix up' the registers |
| 1173 | * to reflect this. |
| 1174 | * |
| 1175 | * Intel PEBS/LBR appear to typically provide the effective address, nothing |
| 1176 | * much we can do about that but pray and treat it like a linear address. |
| 1177 | */ |
| 1178 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) |
| 1179 | { |
| 1180 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; |
| 1181 | if (regs->flags & X86_VM_MASK) |
| 1182 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); |
| 1183 | regs->ip = ip; |
| 1184 | } |
| 1185 | |
Jiri Olsa | 0bf79d4 | 2012-10-10 14:53:14 +0200 | [diff] [blame] | 1186 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
Jiri Olsa | 20550a4 | 2012-10-10 14:53:15 +0200 | [diff] [blame] | 1187 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
Jiri Olsa | 43c032f | 2012-10-10 14:53:13 +0200 | [diff] [blame] | 1188 | |
Huang Rui | a49ac9f | 2016-03-25 11:18:25 +0800 | [diff] [blame] | 1189 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 1190 | char *page); |
Andi Kleen | fc07e9f | 2016-05-19 17:09:56 -0700 | [diff] [blame] | 1191 | ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 1192 | char *page); |
Kan Liang | a9c81ccd | 2021-04-12 07:30:57 -0700 | [diff] [blame] | 1193 | ssize_t events_hybrid_sysfs_show(struct device *dev, |
| 1194 | struct device_attribute *attr, |
| 1195 | char *page); |
Huang Rui | a49ac9f | 2016-03-25 11:18:25 +0800 | [diff] [blame] | 1196 | |
Kan Liang | fc4b8fc | 2021-04-12 07:30:45 -0700 | [diff] [blame] | 1197 | static inline bool fixed_counter_disabled(int i, struct pmu *pmu) |
Kan Liang | 3245161 | 2021-01-28 14:40:11 -0800 | [diff] [blame] | 1198 | { |
Kan Liang | fc4b8fc | 2021-04-12 07:30:45 -0700 | [diff] [blame] | 1199 | u64 intel_ctrl = hybrid(pmu, intel_ctrl); |
| 1200 | |
| 1201 | return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); |
Kan Liang | 3245161 | 2021-01-28 14:40:11 -0800 | [diff] [blame] | 1202 | } |
| 1203 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1204 | #ifdef CONFIG_CPU_SUP_AMD |
| 1205 | |
| 1206 | int amd_pmu_init(void); |
| 1207 | |
| 1208 | #else /* CONFIG_CPU_SUP_AMD */ |
| 1209 | |
| 1210 | static inline int amd_pmu_init(void) |
| 1211 | { |
| 1212 | return 0; |
| 1213 | } |
| 1214 | |
| 1215 | #endif /* CONFIG_CPU_SUP_AMD */ |
| 1216 | |
Alexander Shishkin | 42880f7 | 2019-08-06 11:46:01 +0300 | [diff] [blame] | 1217 | static inline int is_pebs_pt(struct perf_event *event) |
| 1218 | { |
| 1219 | return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); |
| 1220 | } |
| 1221 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1222 | #ifdef CONFIG_CPU_SUP_INTEL |
| 1223 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 1224 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 1225 | { |
Jiri Olsa | 67266c1 | 2018-11-21 11:16:11 +0100 | [diff] [blame] | 1226 | struct hw_perf_event *hwc = &event->hw; |
| 1227 | unsigned int hw_event, bts_event; |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 1228 | |
Jiri Olsa | 67266c1 | 2018-11-21 11:16:11 +0100 | [diff] [blame] | 1229 | if (event->attr.freq) |
| 1230 | return false; |
| 1231 | |
| 1232 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
| 1233 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
| 1234 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 1235 | return hw_event == bts_event && period == 1; |
| 1236 | } |
| 1237 | |
| 1238 | static inline bool intel_pmu_has_bts(struct perf_event *event) |
| 1239 | { |
| 1240 | struct hw_perf_event *hwc = &event->hw; |
| 1241 | |
| 1242 | return intel_pmu_has_bts_period(event, hwc->sample_period); |
Alexander Shishkin | 4807034 | 2015-01-14 14:18:20 +0200 | [diff] [blame] | 1243 | } |
| 1244 | |
Song Liu | c22ac2a | 2021-09-10 11:33:50 -0700 | [diff] [blame] | 1245 | static __always_inline void __intel_pmu_pebs_disable_all(void) |
| 1246 | { |
| 1247 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
| 1248 | } |
| 1249 | |
| 1250 | static __always_inline void __intel_pmu_arch_lbr_disable(void) |
| 1251 | { |
| 1252 | wrmsrl(MSR_ARCH_LBR_CTL, 0); |
| 1253 | } |
| 1254 | |
| 1255 | static __always_inline void __intel_pmu_lbr_disable(void) |
| 1256 | { |
| 1257 | u64 debugctl; |
| 1258 | |
| 1259 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
| 1260 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
| 1261 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
| 1262 | } |
| 1263 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1264 | int intel_pmu_save_and_restart(struct perf_event *event); |
| 1265 | |
| 1266 | struct event_constraint * |
Stephane Eranian | 79cba82 | 2014-11-17 20:06:56 +0100 | [diff] [blame] | 1267 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
| 1268 | struct perf_event *event); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1269 | |
Peter Zijlstra (Intel) | d01b1f9 | 2019-03-05 22:23:15 +0100 | [diff] [blame] | 1270 | extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
| 1271 | extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1272 | |
| 1273 | int intel_pmu_init(void); |
| 1274 | |
| 1275 | void init_debug_store_on_cpu(int cpu); |
| 1276 | |
| 1277 | void fini_debug_store_on_cpu(int cpu); |
| 1278 | |
| 1279 | void release_ds_buffers(void); |
| 1280 | |
| 1281 | void reserve_ds_buffers(void); |
| 1282 | |
Kan Liang | c085fb8 | 2020-07-03 05:49:29 -0700 | [diff] [blame] | 1283 | void release_lbr_buffers(void); |
| 1284 | |
Like Xu | 488e13a | 2021-04-30 13:22:47 +0800 | [diff] [blame] | 1285 | void reserve_lbr_buffers(void); |
| 1286 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1287 | extern struct event_constraint bts_constraint; |
Like Xu | 097e431 | 2020-06-13 16:09:49 +0800 | [diff] [blame] | 1288 | extern struct event_constraint vlbr_constraint; |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1289 | |
| 1290 | void intel_pmu_enable_bts(u64 config); |
| 1291 | |
| 1292 | void intel_pmu_disable_bts(void); |
| 1293 | |
| 1294 | int intel_pmu_drain_bts_buffer(void); |
| 1295 | |
| 1296 | extern struct event_constraint intel_core2_pebs_event_constraints[]; |
| 1297 | |
| 1298 | extern struct event_constraint intel_atom_pebs_event_constraints[]; |
| 1299 | |
Yan, Zheng | 1fa6418 | 2013-07-18 17:02:24 +0800 | [diff] [blame] | 1300 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
| 1301 | |
Kan Liang | 8b92c3a | 2016-04-15 00:42:47 -0700 | [diff] [blame] | 1302 | extern struct event_constraint intel_glm_pebs_event_constraints[]; |
| 1303 | |
Kan Liang | dd0b06b | 2017-07-12 09:44:23 -0400 | [diff] [blame] | 1304 | extern struct event_constraint intel_glp_pebs_event_constraints[]; |
| 1305 | |
Kan Liang | f83d2f9 | 2021-04-12 07:31:00 -0700 | [diff] [blame] | 1306 | extern struct event_constraint intel_grt_pebs_event_constraints[]; |
| 1307 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1308 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
| 1309 | |
| 1310 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; |
| 1311 | |
| 1312 | extern struct event_constraint intel_snb_pebs_event_constraints[]; |
| 1313 | |
Stephane Eranian | 20a36e3 | 2012-09-11 01:07:01 +0200 | [diff] [blame] | 1314 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
| 1315 | |
Andi Kleen | 3044318 | 2013-06-17 17:36:49 -0700 | [diff] [blame] | 1316 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
| 1317 | |
Stephane Eranian | b3e6246 | 2016-03-03 20:50:42 +0100 | [diff] [blame] | 1318 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; |
| 1319 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 1320 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
| 1321 | |
Kan Liang | 6017608 | 2019-04-02 12:45:05 -0700 | [diff] [blame] | 1322 | extern struct event_constraint intel_icl_pebs_event_constraints[]; |
| 1323 | |
Kan Liang | 61b985e | 2021-01-28 14:40:10 -0800 | [diff] [blame] | 1324 | extern struct event_constraint intel_spr_pebs_event_constraints[]; |
| 1325 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1326 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
| 1327 | |
Peter Zijlstra | 68f7082 | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 1328 | void intel_pmu_pebs_add(struct perf_event *event); |
| 1329 | |
| 1330 | void intel_pmu_pebs_del(struct perf_event *event); |
| 1331 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1332 | void intel_pmu_pebs_enable(struct perf_event *event); |
| 1333 | |
| 1334 | void intel_pmu_pebs_disable(struct perf_event *event); |
| 1335 | |
| 1336 | void intel_pmu_pebs_enable_all(void); |
| 1337 | |
| 1338 | void intel_pmu_pebs_disable_all(void); |
| 1339 | |
Yan, Zheng | 9c964ef | 2015-05-06 15:33:51 -0400 | [diff] [blame] | 1340 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 1341 | |
Kan Liang | 5bee2cc | 2018-02-12 14:20:33 -0800 | [diff] [blame] | 1342 | void intel_pmu_auto_reload_read(struct perf_event *event); |
| 1343 | |
Kan Liang | 5624986 | 2020-07-03 05:49:16 -0700 | [diff] [blame] | 1344 | void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); |
Kan Liang | c22497f | 2019-04-02 12:45:02 -0700 | [diff] [blame] | 1345 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1346 | void intel_ds_init(void); |
| 1347 | |
Alexey Budankov | 421ca86 | 2019-10-23 10:12:54 +0300 | [diff] [blame] | 1348 | void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, |
| 1349 | struct perf_event_context *next); |
| 1350 | |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 1351 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); |
| 1352 | |
David Carrillo-Cisneros | 19fc9dd | 2016-06-21 11:31:11 -0700 | [diff] [blame] | 1353 | u64 lbr_from_signext_quirk_wr(u64 val); |
| 1354 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1355 | void intel_pmu_lbr_reset(void); |
| 1356 | |
Kan Liang | 9f354a7 | 2020-07-03 05:49:08 -0700 | [diff] [blame] | 1357 | void intel_pmu_lbr_reset_32(void); |
| 1358 | |
| 1359 | void intel_pmu_lbr_reset_64(void); |
| 1360 | |
Peter Zijlstra | 68f7082 | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 1361 | void intel_pmu_lbr_add(struct perf_event *event); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1362 | |
Peter Zijlstra | 68f7082 | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 1363 | void intel_pmu_lbr_del(struct perf_event *event); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1364 | |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 1365 | void intel_pmu_lbr_enable_all(bool pmi); |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1366 | |
| 1367 | void intel_pmu_lbr_disable_all(void); |
| 1368 | |
| 1369 | void intel_pmu_lbr_read(void); |
| 1370 | |
Kan Liang | c301b1d | 2020-07-03 05:49:09 -0700 | [diff] [blame] | 1371 | void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); |
| 1372 | |
| 1373 | void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); |
| 1374 | |
Kan Liang | 799571b | 2020-07-03 05:49:10 -0700 | [diff] [blame] | 1375 | void intel_pmu_lbr_save(void *ctx); |
| 1376 | |
| 1377 | void intel_pmu_lbr_restore(void *ctx); |
| 1378 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1379 | void intel_pmu_lbr_init_core(void); |
| 1380 | |
| 1381 | void intel_pmu_lbr_init_nhm(void); |
| 1382 | |
| 1383 | void intel_pmu_lbr_init_atom(void); |
| 1384 | |
Kan Liang | f21d5ad | 2016-04-15 00:53:45 -0700 | [diff] [blame] | 1385 | void intel_pmu_lbr_init_slm(void); |
| 1386 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 1387 | void intel_pmu_lbr_init_snb(void); |
| 1388 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 1389 | void intel_pmu_lbr_init_hsw(void); |
| 1390 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 1391 | void intel_pmu_lbr_init_skl(void); |
| 1392 | |
Harish Chegondi | 1e7b939 | 2015-12-07 14:28:18 -0800 | [diff] [blame] | 1393 | void intel_pmu_lbr_init_knl(void); |
| 1394 | |
Kan Liang | 47125db | 2020-07-03 05:49:20 -0700 | [diff] [blame] | 1395 | void intel_pmu_arch_lbr_init(void); |
| 1396 | |
Andi Kleen | e17dc65 | 2016-03-01 14:25:24 -0800 | [diff] [blame] | 1397 | void intel_pmu_pebs_data_source_nhm(void); |
| 1398 | |
Andi Kleen | 6ae5fa6 | 2017-08-16 15:21:54 -0700 | [diff] [blame] | 1399 | void intel_pmu_pebs_data_source_skl(bool pmem); |
| 1400 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 1401 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
| 1402 | |
Alexander Shishkin | 52ca9ce | 2015-01-30 12:39:52 +0200 | [diff] [blame] | 1403 | void intel_pt_interrupt(void); |
| 1404 | |
Alexander Shishkin | 8062382 | 2015-01-30 12:40:35 +0200 | [diff] [blame] | 1405 | int intel_bts_interrupt(void); |
| 1406 | |
| 1407 | void intel_bts_enable_local(void); |
| 1408 | |
| 1409 | void intel_bts_disable_local(void); |
| 1410 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1411 | int p4_pmu_init(void); |
| 1412 | |
| 1413 | int p6_pmu_init(void); |
| 1414 | |
Vince Weaver | e717bf4 | 2012-09-26 14:12:52 -0400 | [diff] [blame] | 1415 | int knc_pmu_init(void); |
| 1416 | |
Stephane Eranian | b37609c | 2014-11-17 20:07:04 +0100 | [diff] [blame] | 1417 | static inline int is_ht_workaround_enabled(void) |
| 1418 | { |
| 1419 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
| 1420 | } |
Andi Kleen | 47732d8 | 2015-06-29 14:22:13 -0700 | [diff] [blame] | 1421 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1422 | #else /* CONFIG_CPU_SUP_INTEL */ |
| 1423 | |
| 1424 | static inline void reserve_ds_buffers(void) |
| 1425 | { |
| 1426 | } |
| 1427 | |
| 1428 | static inline void release_ds_buffers(void) |
| 1429 | { |
| 1430 | } |
| 1431 | |
Kan Liang | c085fb8 | 2020-07-03 05:49:29 -0700 | [diff] [blame] | 1432 | static inline void release_lbr_buffers(void) |
| 1433 | { |
| 1434 | } |
| 1435 | |
Like Xu | 488e13a | 2021-04-30 13:22:47 +0800 | [diff] [blame] | 1436 | static inline void reserve_lbr_buffers(void) |
| 1437 | { |
| 1438 | } |
| 1439 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1440 | static inline int intel_pmu_init(void) |
| 1441 | { |
| 1442 | return 0; |
| 1443 | } |
| 1444 | |
Peter Zijlstra | f764c58 | 2019-03-15 09:14:10 +0100 | [diff] [blame] | 1445 | static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1446 | { |
Peter Zijlstra (Intel) | d01b1f9 | 2019-03-05 22:23:15 +0100 | [diff] [blame] | 1447 | return 0; |
| 1448 | } |
| 1449 | |
Peter Zijlstra | f764c58 | 2019-03-15 09:14:10 +0100 | [diff] [blame] | 1450 | static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
Peter Zijlstra (Intel) | d01b1f9 | 2019-03-05 22:23:15 +0100 | [diff] [blame] | 1451 | { |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1452 | } |
| 1453 | |
Peter Zijlstra | cc1790c | 2015-05-21 10:57:17 +0200 | [diff] [blame] | 1454 | static inline int is_ht_workaround_enabled(void) |
| 1455 | { |
| 1456 | return 0; |
| 1457 | } |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1458 | #endif /* CONFIG_CPU_SUP_INTEL */ |
CodyYao-oc | 3a4ac12 | 2020-04-13 11:14:29 +0800 | [diff] [blame] | 1459 | |
| 1460 | #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) |
| 1461 | int zhaoxin_pmu_init(void); |
| 1462 | #else |
| 1463 | static inline int zhaoxin_pmu_init(void) |
| 1464 | { |
| 1465 | return 0; |
| 1466 | } |
| 1467 | #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ |