Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 1 | #include <linux/perf_event.h> |
| 2 | #include <linux/types.h> |
| 3 | |
| 4 | #include <asm/perf_event.h> |
| 5 | #include <asm/msr.h> |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 6 | #include <asm/insn.h> |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 7 | |
Borislav Petkov | 27f6d22 | 2016-02-10 10:55:23 +0100 | [diff] [blame^] | 8 | #include "../perf_event.h" |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 9 | |
| 10 | enum { |
| 11 | LBR_FORMAT_32 = 0x00, |
| 12 | LBR_FORMAT_LIP = 0x01, |
| 13 | LBR_FORMAT_EIP = 0x02, |
| 14 | LBR_FORMAT_EIP_FLAGS = 0x03, |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 15 | LBR_FORMAT_EIP_FLAGS2 = 0x04, |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 16 | LBR_FORMAT_INFO = 0x05, |
| 17 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO, |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 18 | }; |
| 19 | |
| 20 | static enum { |
| 21 | LBR_EIP_FLAGS = 1, |
| 22 | LBR_TSX = 2, |
| 23 | } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = { |
| 24 | [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS, |
| 25 | [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX, |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 26 | }; |
| 27 | |
| 28 | /* |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 29 | * Intel LBR_SELECT bits |
| 30 | * Intel Vol3a, April 2011, Section 16.7 Table 16-10 |
| 31 | * |
| 32 | * Hardware branch filter (not available on all CPUs) |
| 33 | */ |
| 34 | #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ |
| 35 | #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ |
| 36 | #define LBR_JCC_BIT 2 /* do not capture conditional branches */ |
| 37 | #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ |
| 38 | #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ |
| 39 | #define LBR_RETURN_BIT 5 /* do not capture near returns */ |
| 40 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ |
| 41 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ |
| 42 | #define LBR_FAR_BIT 8 /* do not capture far branches */ |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 43 | #define LBR_CALL_STACK_BIT 9 /* enable call stack */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 44 | |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 45 | /* |
| 46 | * Following bit only exists in Linux; we mask it out before writing it to |
| 47 | * the actual MSR. But it helps the constraint perf code to understand |
| 48 | * that this is a separate configuration. |
| 49 | */ |
| 50 | #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */ |
| 51 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 52 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) |
| 53 | #define LBR_USER (1 << LBR_USER_BIT) |
| 54 | #define LBR_JCC (1 << LBR_JCC_BIT) |
| 55 | #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) |
| 56 | #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) |
| 57 | #define LBR_RETURN (1 << LBR_RETURN_BIT) |
| 58 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) |
| 59 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) |
| 60 | #define LBR_FAR (1 << LBR_FAR_BIT) |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 61 | #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT) |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 62 | #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT) |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 63 | |
| 64 | #define LBR_PLM (LBR_KERNEL | LBR_USER) |
| 65 | |
| 66 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ |
| 67 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ |
| 68 | #define LBR_IGN 0 /* ignored */ |
| 69 | |
| 70 | #define LBR_ANY \ |
| 71 | (LBR_JCC |\ |
| 72 | LBR_REL_CALL |\ |
| 73 | LBR_IND_CALL |\ |
| 74 | LBR_RETURN |\ |
| 75 | LBR_REL_JMP |\ |
| 76 | LBR_IND_JMP |\ |
| 77 | LBR_FAR) |
| 78 | |
| 79 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 80 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) |
| 81 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 82 | |
| 83 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 84 | * x86control flow change classification |
| 85 | * x86control flow changes include branches, interrupts, traps, faults |
| 86 | */ |
| 87 | enum { |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 88 | X86_BR_NONE = 0, /* unknown */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 89 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 90 | X86_BR_USER = 1 << 0, /* branch target is user */ |
| 91 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 92 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 93 | X86_BR_CALL = 1 << 2, /* call */ |
| 94 | X86_BR_RET = 1 << 3, /* return */ |
| 95 | X86_BR_SYSCALL = 1 << 4, /* syscall */ |
| 96 | X86_BR_SYSRET = 1 << 5, /* syscall return */ |
| 97 | X86_BR_INT = 1 << 6, /* sw interrupt */ |
| 98 | X86_BR_IRET = 1 << 7, /* return from interrupt */ |
| 99 | X86_BR_JCC = 1 << 8, /* conditional */ |
| 100 | X86_BR_JMP = 1 << 9, /* jump */ |
| 101 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ |
| 102 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ |
| 103 | X86_BR_ABORT = 1 << 12,/* transaction abort */ |
| 104 | X86_BR_IN_TX = 1 << 13,/* in transaction */ |
| 105 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ |
Yan, Zheng | aa54ae9 | 2014-11-04 21:56:11 -0500 | [diff] [blame] | 106 | X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ |
| 107 | X86_BR_CALL_STACK = 1 << 16,/* call stack */ |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 108 | X86_BR_IND_JMP = 1 << 17,/* indirect jump */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 109 | }; |
| 110 | |
| 111 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 112 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 113 | |
| 114 | #define X86_BR_ANY \ |
| 115 | (X86_BR_CALL |\ |
| 116 | X86_BR_RET |\ |
| 117 | X86_BR_SYSCALL |\ |
| 118 | X86_BR_SYSRET |\ |
| 119 | X86_BR_INT |\ |
| 120 | X86_BR_IRET |\ |
| 121 | X86_BR_JCC |\ |
| 122 | X86_BR_JMP |\ |
| 123 | X86_BR_IRQ |\ |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 124 | X86_BR_ABORT |\ |
Yan, Zheng | aa54ae9 | 2014-11-04 21:56:11 -0500 | [diff] [blame] | 125 | X86_BR_IND_CALL |\ |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 126 | X86_BR_IND_JMP |\ |
Yan, Zheng | aa54ae9 | 2014-11-04 21:56:11 -0500 | [diff] [blame] | 127 | X86_BR_ZERO_CALL) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 128 | |
| 129 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) |
| 130 | |
| 131 | #define X86_BR_ANY_CALL \ |
| 132 | (X86_BR_CALL |\ |
| 133 | X86_BR_IND_CALL |\ |
Yan, Zheng | aa54ae9 | 2014-11-04 21:56:11 -0500 | [diff] [blame] | 134 | X86_BR_ZERO_CALL |\ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 135 | X86_BR_SYSCALL |\ |
| 136 | X86_BR_IRQ |\ |
| 137 | X86_BR_INT) |
| 138 | |
| 139 | static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); |
| 140 | |
| 141 | /* |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 142 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI |
| 143 | * otherwise it becomes near impossible to get a reliable stack. |
| 144 | */ |
| 145 | |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 146 | static void __intel_pmu_lbr_enable(bool pmi) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 147 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 148 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Andi Kleen | cd1f11d | 2015-03-20 10:11:24 -0700 | [diff] [blame] | 149 | u64 debugctl, lbr_select = 0, orig_debugctl; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 150 | |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 151 | /* |
Andi Kleen | 425507f | 2015-05-10 12:22:46 -0700 | [diff] [blame] | 152 | * No need to unfreeze manually, as v4 can do that as part |
| 153 | * of the GLOBAL_STATUS ack. |
| 154 | */ |
| 155 | if (pmi && x86_pmu.version >= 4) |
| 156 | return; |
| 157 | |
| 158 | /* |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 159 | * No need to reprogram LBR_SELECT in a PMI, as it |
| 160 | * did not change. |
| 161 | */ |
Kan Liang | 96f3eda | 2015-09-14 10:14:07 -0400 | [diff] [blame] | 162 | if (cpuc->lbr_sel) |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 163 | lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; |
Stephane Eranian | 6fc2e83 | 2015-12-03 23:33:17 +0100 | [diff] [blame] | 164 | if (!pmi && cpuc->lbr_sel) |
Yan, Zheng | 2c70d00 | 2014-11-04 21:56:10 -0500 | [diff] [blame] | 165 | wrmsrl(MSR_LBR_SELECT, lbr_select); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 166 | |
| 167 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Andi Kleen | cd1f11d | 2015-03-20 10:11:24 -0700 | [diff] [blame] | 168 | orig_debugctl = debugctl; |
Yan, Zheng | 2c70d00 | 2014-11-04 21:56:10 -0500 | [diff] [blame] | 169 | debugctl |= DEBUGCTLMSR_LBR; |
| 170 | /* |
| 171 | * LBR callstack does not work well with FREEZE_LBRS_ON_PMI. |
| 172 | * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions |
| 173 | * may cause superfluous increase/decrease of LBR_TOS. |
| 174 | */ |
| 175 | if (!(lbr_select & LBR_CALL_STACK)) |
| 176 | debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; |
Andi Kleen | cd1f11d | 2015-03-20 10:11:24 -0700 | [diff] [blame] | 177 | if (orig_debugctl != debugctl) |
| 178 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | static void __intel_pmu_lbr_disable(void) |
| 182 | { |
| 183 | u64 debugctl; |
| 184 | |
| 185 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Peter Zijlstra | 7c5ecaf | 2010-03-25 14:51:49 +0100 | [diff] [blame] | 186 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 187 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
| 188 | } |
| 189 | |
| 190 | static void intel_pmu_lbr_reset_32(void) |
| 191 | { |
| 192 | int i; |
| 193 | |
| 194 | for (i = 0; i < x86_pmu.lbr_nr; i++) |
| 195 | wrmsrl(x86_pmu.lbr_from + i, 0); |
| 196 | } |
| 197 | |
| 198 | static void intel_pmu_lbr_reset_64(void) |
| 199 | { |
| 200 | int i; |
| 201 | |
| 202 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
| 203 | wrmsrl(x86_pmu.lbr_from + i, 0); |
| 204 | wrmsrl(x86_pmu.lbr_to + i, 0); |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 205 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
| 206 | wrmsrl(MSR_LBR_INFO_0 + i, 0); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 207 | } |
| 208 | } |
| 209 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 210 | void intel_pmu_lbr_reset(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 211 | { |
Peter Zijlstra | 74846d3 | 2010-03-05 13:49:35 +0100 | [diff] [blame] | 212 | if (!x86_pmu.lbr_nr) |
| 213 | return; |
| 214 | |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 215 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 216 | intel_pmu_lbr_reset_32(); |
| 217 | else |
| 218 | intel_pmu_lbr_reset_64(); |
| 219 | } |
| 220 | |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 221 | /* |
| 222 | * TOS = most recently recorded branch |
| 223 | */ |
| 224 | static inline u64 intel_pmu_lbr_tos(void) |
| 225 | { |
| 226 | u64 tos; |
| 227 | |
| 228 | rdmsrl(x86_pmu.lbr_tos, tos); |
| 229 | return tos; |
| 230 | } |
| 231 | |
| 232 | enum { |
| 233 | LBR_NONE, |
| 234 | LBR_VALID, |
| 235 | }; |
| 236 | |
| 237 | static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) |
| 238 | { |
| 239 | int i; |
| 240 | unsigned lbr_idx, mask; |
| 241 | u64 tos; |
| 242 | |
| 243 | if (task_ctx->lbr_callstack_users == 0 || |
| 244 | task_ctx->lbr_stack_state == LBR_NONE) { |
| 245 | intel_pmu_lbr_reset(); |
| 246 | return; |
| 247 | } |
| 248 | |
| 249 | mask = x86_pmu.lbr_nr - 1; |
Andi Kleen | b28ae95 | 2015-10-20 11:46:33 -0700 | [diff] [blame] | 250 | tos = task_ctx->tos; |
Andi Kleen | 90405aa | 2015-05-27 21:13:18 -0700 | [diff] [blame] | 251 | for (i = 0; i < tos; i++) { |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 252 | lbr_idx = (tos - i) & mask; |
| 253 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); |
| 254 | wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 255 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
Andi Kleen | e057336 | 2015-05-27 21:13:17 -0700 | [diff] [blame] | 256 | wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 257 | } |
Andi Kleen | b28ae95 | 2015-10-20 11:46:33 -0700 | [diff] [blame] | 258 | wrmsrl(x86_pmu.lbr_tos, tos); |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 259 | task_ctx->lbr_stack_state = LBR_NONE; |
| 260 | } |
| 261 | |
| 262 | static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) |
| 263 | { |
| 264 | int i; |
| 265 | unsigned lbr_idx, mask; |
| 266 | u64 tos; |
| 267 | |
| 268 | if (task_ctx->lbr_callstack_users == 0) { |
| 269 | task_ctx->lbr_stack_state = LBR_NONE; |
| 270 | return; |
| 271 | } |
| 272 | |
| 273 | mask = x86_pmu.lbr_nr - 1; |
| 274 | tos = intel_pmu_lbr_tos(); |
Andi Kleen | 90405aa | 2015-05-27 21:13:18 -0700 | [diff] [blame] | 275 | for (i = 0; i < tos; i++) { |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 276 | lbr_idx = (tos - i) & mask; |
| 277 | rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); |
| 278 | rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 279 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
Andi Kleen | e057336 | 2015-05-27 21:13:17 -0700 | [diff] [blame] | 280 | rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 281 | } |
Andi Kleen | b28ae95 | 2015-10-20 11:46:33 -0700 | [diff] [blame] | 282 | task_ctx->tos = tos; |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 283 | task_ctx->lbr_stack_state = LBR_VALID; |
| 284 | } |
| 285 | |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 286 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) |
| 287 | { |
| 288 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 289 | struct x86_perf_task_context *task_ctx; |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 290 | |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 291 | /* |
Yan, Zheng | 76cb2c6 | 2014-11-04 21:56:05 -0500 | [diff] [blame] | 292 | * If LBR callstack feature is enabled and the stack was saved when |
| 293 | * the task was scheduled out, restore the stack. Otherwise flush |
| 294 | * the LBR stack. |
| 295 | */ |
| 296 | task_ctx = ctx ? ctx->task_ctx_data : NULL; |
| 297 | if (task_ctx) { |
| 298 | if (sched_in) { |
| 299 | __intel_pmu_lbr_restore(task_ctx); |
| 300 | cpuc->lbr_context = ctx; |
| 301 | } else { |
| 302 | __intel_pmu_lbr_save(task_ctx); |
| 303 | } |
| 304 | return; |
| 305 | } |
| 306 | |
| 307 | /* |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 308 | * When sampling the branck stack in system-wide, it may be |
| 309 | * necessary to flush the stack on context switch. This happens |
| 310 | * when the branch stack does not tag its entries with the pid |
| 311 | * of the current task. Otherwise it becomes impossible to |
| 312 | * associate a branch entry with a task. This ambiguity is more |
| 313 | * likely to appear when the branch stack supports priv level |
| 314 | * filtering and the user sets it to monitor only at the user |
| 315 | * level (which could be a useful measurement in system-wide |
| 316 | * mode). In that case, the risk is high of having a branch |
| 317 | * stack with branch from multiple tasks. |
| 318 | */ |
| 319 | if (sched_in) { |
| 320 | intel_pmu_lbr_reset(); |
| 321 | cpuc->lbr_context = ctx; |
| 322 | } |
| 323 | } |
| 324 | |
Yan, Zheng | 63f0c1d | 2014-11-04 21:56:04 -0500 | [diff] [blame] | 325 | static inline bool branch_user_callstack(unsigned br_sel) |
| 326 | { |
| 327 | return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK); |
| 328 | } |
| 329 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 330 | void intel_pmu_lbr_enable(struct perf_event *event) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 331 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 332 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Yan, Zheng | 63f0c1d | 2014-11-04 21:56:04 -0500 | [diff] [blame] | 333 | struct x86_perf_task_context *task_ctx; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 334 | |
| 335 | if (!x86_pmu.lbr_nr) |
| 336 | return; |
| 337 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 338 | /* |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 339 | * Reset the LBR stack if we changed task context to |
| 340 | * avoid data leaks. |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 341 | */ |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 342 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 343 | intel_pmu_lbr_reset(); |
| 344 | cpuc->lbr_context = event->ctx; |
| 345 | } |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 346 | cpuc->br_sel = event->hw.branch_reg.reg; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 347 | |
Yan, Zheng | 63f0c1d | 2014-11-04 21:56:04 -0500 | [diff] [blame] | 348 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
| 349 | event->ctx->task_ctx_data) { |
| 350 | task_ctx = event->ctx->task_ctx_data; |
| 351 | task_ctx->lbr_callstack_users++; |
| 352 | } |
| 353 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 354 | cpuc->lbr_users++; |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 355 | perf_sched_cb_inc(event->ctx->pmu); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 356 | } |
| 357 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 358 | void intel_pmu_lbr_disable(struct perf_event *event) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 359 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 360 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Yan, Zheng | 63f0c1d | 2014-11-04 21:56:04 -0500 | [diff] [blame] | 361 | struct x86_perf_task_context *task_ctx; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 362 | |
| 363 | if (!x86_pmu.lbr_nr) |
| 364 | return; |
| 365 | |
Yan, Zheng | 63f0c1d | 2014-11-04 21:56:04 -0500 | [diff] [blame] | 366 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
| 367 | event->ctx->task_ctx_data) { |
| 368 | task_ctx = event->ctx->task_ctx_data; |
| 369 | task_ctx->lbr_callstack_users--; |
| 370 | } |
| 371 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 372 | cpuc->lbr_users--; |
Peter Zijlstra | b83a46e | 2010-03-08 13:51:12 +0100 | [diff] [blame] | 373 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
Yan, Zheng | 2a0ad3b | 2014-11-04 21:55:59 -0500 | [diff] [blame] | 374 | perf_sched_cb_dec(event->ctx->pmu); |
Peter Zijlstra | 2df202b | 2010-03-06 13:48:54 +0100 | [diff] [blame] | 375 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 376 | if (cpuc->enabled && !cpuc->lbr_users) { |
Peter Zijlstra | 2df202b | 2010-03-06 13:48:54 +0100 | [diff] [blame] | 377 | __intel_pmu_lbr_disable(); |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 378 | /* avoid stale pointer */ |
| 379 | cpuc->lbr_context = NULL; |
| 380 | } |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 381 | } |
| 382 | |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 383 | void intel_pmu_lbr_enable_all(bool pmi) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 384 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 385 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 386 | |
| 387 | if (cpuc->lbr_users) |
Andi Kleen | 1a78d93 | 2015-03-20 10:11:23 -0700 | [diff] [blame] | 388 | __intel_pmu_lbr_enable(pmi); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 389 | } |
| 390 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 391 | void intel_pmu_lbr_disable_all(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 392 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 393 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 394 | |
| 395 | if (cpuc->lbr_users) |
| 396 | __intel_pmu_lbr_disable(); |
| 397 | } |
| 398 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 399 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) |
| 400 | { |
| 401 | unsigned long mask = x86_pmu.lbr_nr - 1; |
| 402 | u64 tos = intel_pmu_lbr_tos(); |
| 403 | int i; |
| 404 | |
Peter Zijlstra | 63fb3f9 | 2010-03-09 11:51:02 +0100 | [diff] [blame] | 405 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 406 | unsigned long lbr_idx = (tos - i) & mask; |
| 407 | union { |
| 408 | struct { |
| 409 | u32 from; |
| 410 | u32 to; |
| 411 | }; |
| 412 | u64 lbr; |
| 413 | } msr_lastbranch; |
| 414 | |
| 415 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); |
| 416 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 417 | cpuc->lbr_entries[i].from = msr_lastbranch.from; |
| 418 | cpuc->lbr_entries[i].to = msr_lastbranch.to; |
| 419 | cpuc->lbr_entries[i].mispred = 0; |
| 420 | cpuc->lbr_entries[i].predicted = 0; |
| 421 | cpuc->lbr_entries[i].reserved = 0; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 422 | } |
| 423 | cpuc->lbr_stack.nr = i; |
| 424 | } |
| 425 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 426 | /* |
| 427 | * Due to lack of segmentation in Linux the effective address (offset) |
| 428 | * is the same as the linear address, allowing us to merge the LIP and EIP |
| 429 | * LBR formats. |
| 430 | */ |
| 431 | static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) |
| 432 | { |
Stephane Eranian | 6fc2e83 | 2015-12-03 23:33:17 +0100 | [diff] [blame] | 433 | bool need_info = false; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 434 | unsigned long mask = x86_pmu.lbr_nr - 1; |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 435 | int lbr_format = x86_pmu.intel_cap.lbr_format; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 436 | u64 tos = intel_pmu_lbr_tos(); |
| 437 | int i; |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 438 | int out = 0; |
Andi Kleen | 90405aa | 2015-05-27 21:13:18 -0700 | [diff] [blame] | 439 | int num = x86_pmu.lbr_nr; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 440 | |
Stephane Eranian | 6fc2e83 | 2015-12-03 23:33:17 +0100 | [diff] [blame] | 441 | if (cpuc->lbr_sel) { |
| 442 | need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO); |
| 443 | if (cpuc->lbr_sel->config & LBR_CALL_STACK) |
| 444 | num = tos; |
| 445 | } |
Andi Kleen | 90405aa | 2015-05-27 21:13:18 -0700 | [diff] [blame] | 446 | |
| 447 | for (i = 0; i < num; i++) { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 448 | unsigned long lbr_idx = (tos - i) & mask; |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 449 | u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; |
| 450 | int skip = 0; |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 451 | u16 cycles = 0; |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 452 | int lbr_flags = lbr_desc[lbr_format]; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 453 | |
| 454 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); |
| 455 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); |
| 456 | |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 457 | if (lbr_format == LBR_FORMAT_INFO && need_info) { |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 458 | u64 info; |
| 459 | |
| 460 | rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info); |
| 461 | mis = !!(info & LBR_INFO_MISPRED); |
| 462 | pred = !mis; |
| 463 | in_tx = !!(info & LBR_INFO_IN_TX); |
| 464 | abort = !!(info & LBR_INFO_ABORT); |
| 465 | cycles = (info & LBR_INFO_CYCLES); |
| 466 | } |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 467 | if (lbr_flags & LBR_EIP_FLAGS) { |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 468 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
| 469 | pred = !mis; |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 470 | skip = 1; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 471 | } |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 472 | if (lbr_flags & LBR_TSX) { |
| 473 | in_tx = !!(from & LBR_FROM_FLAG_IN_TX); |
| 474 | abort = !!(from & LBR_FROM_FLAG_ABORT); |
| 475 | skip = 3; |
| 476 | } |
| 477 | from = (u64)((((s64)from) << skip) >> skip); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 478 | |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 479 | /* |
| 480 | * Some CPUs report duplicated abort records, |
| 481 | * with the second entry not having an abort bit set. |
| 482 | * Skip them here. This loop runs backwards, |
| 483 | * so we need to undo the previous record. |
| 484 | * If the abort just happened outside the window |
| 485 | * the extra entry cannot be removed. |
| 486 | */ |
| 487 | if (abort && x86_pmu.lbr_double_abort && out > 0) |
| 488 | out--; |
| 489 | |
| 490 | cpuc->lbr_entries[out].from = from; |
| 491 | cpuc->lbr_entries[out].to = to; |
| 492 | cpuc->lbr_entries[out].mispred = mis; |
| 493 | cpuc->lbr_entries[out].predicted = pred; |
| 494 | cpuc->lbr_entries[out].in_tx = in_tx; |
| 495 | cpuc->lbr_entries[out].abort = abort; |
Andi Kleen | 50eab8f | 2015-05-10 12:22:43 -0700 | [diff] [blame] | 496 | cpuc->lbr_entries[out].cycles = cycles; |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 497 | cpuc->lbr_entries[out].reserved = 0; |
| 498 | out++; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 499 | } |
Andi Kleen | b7af41a | 2013-09-20 07:40:44 -0700 | [diff] [blame] | 500 | cpuc->lbr_stack.nr = out; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 501 | } |
| 502 | |
Kevin Winchester | de0428a | 2011-08-30 20:41:05 -0300 | [diff] [blame] | 503 | void intel_pmu_lbr_read(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 504 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 505 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 506 | |
| 507 | if (!cpuc->lbr_users) |
| 508 | return; |
| 509 | |
Peter Zijlstra | 8db909a | 2010-03-03 17:07:40 +0100 | [diff] [blame] | 510 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 511 | intel_pmu_lbr_read_32(cpuc); |
| 512 | else |
| 513 | intel_pmu_lbr_read_64(cpuc); |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 514 | |
| 515 | intel_pmu_lbr_filter(cpuc); |
| 516 | } |
| 517 | |
| 518 | /* |
| 519 | * SW filter is used: |
| 520 | * - in case there is no HW filter |
| 521 | * - in case the HW filter has errata or limitations |
| 522 | */ |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 523 | static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 524 | { |
| 525 | u64 br_type = event->attr.branch_sample_type; |
| 526 | int mask = 0; |
| 527 | |
| 528 | if (br_type & PERF_SAMPLE_BRANCH_USER) |
| 529 | mask |= X86_BR_USER; |
| 530 | |
Stephane Eranian | 2b923c8 | 2013-05-21 12:53:37 +0200 | [diff] [blame] | 531 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 532 | mask |= X86_BR_KERNEL; |
| 533 | |
| 534 | /* we ignore BRANCH_HV here */ |
| 535 | |
| 536 | if (br_type & PERF_SAMPLE_BRANCH_ANY) |
| 537 | mask |= X86_BR_ANY; |
| 538 | |
| 539 | if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) |
| 540 | mask |= X86_BR_ANY_CALL; |
| 541 | |
| 542 | if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) |
| 543 | mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; |
| 544 | |
| 545 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) |
| 546 | mask |= X86_BR_IND_CALL; |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 547 | |
| 548 | if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX) |
| 549 | mask |= X86_BR_ABORT; |
| 550 | |
| 551 | if (br_type & PERF_SAMPLE_BRANCH_IN_TX) |
| 552 | mask |= X86_BR_IN_TX; |
| 553 | |
| 554 | if (br_type & PERF_SAMPLE_BRANCH_NO_TX) |
| 555 | mask |= X86_BR_NO_TX; |
| 556 | |
Anshuman Khandual | 3754891 | 2014-05-22 12:50:09 +0530 | [diff] [blame] | 557 | if (br_type & PERF_SAMPLE_BRANCH_COND) |
| 558 | mask |= X86_BR_JCC; |
| 559 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 560 | if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) { |
| 561 | if (!x86_pmu_has_lbr_callstack()) |
| 562 | return -EOPNOTSUPP; |
| 563 | if (mask & ~(X86_BR_USER | X86_BR_KERNEL)) |
| 564 | return -EINVAL; |
| 565 | mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET | |
| 566 | X86_BR_CALL_STACK; |
| 567 | } |
| 568 | |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 569 | if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP) |
| 570 | mask |= X86_BR_IND_JMP; |
| 571 | |
Stephane Eranian | d892819 | 2015-10-13 09:09:09 +0200 | [diff] [blame] | 572 | if (br_type & PERF_SAMPLE_BRANCH_CALL) |
| 573 | mask |= X86_BR_CALL | X86_BR_ZERO_CALL; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 574 | /* |
| 575 | * stash actual user request into reg, it may |
| 576 | * be used by fixup code for some CPU |
| 577 | */ |
| 578 | event->hw.branch_reg.reg = mask; |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 579 | return 0; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 580 | } |
| 581 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 582 | /* |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 583 | * setup the HW LBR filter |
| 584 | * Used only when available, may not be enough to disambiguate |
| 585 | * all branches, may need the help of the SW filter |
| 586 | */ |
| 587 | static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) |
| 588 | { |
| 589 | struct hw_perf_event_extra *reg; |
| 590 | u64 br_type = event->attr.branch_sample_type; |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 591 | u64 mask = 0, v; |
| 592 | int i; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 593 | |
Peter Zijlstra | 2c44b19 | 2014-11-05 10:36:45 +0100 | [diff] [blame] | 594 | for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) { |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 595 | if (!(br_type & (1ULL << i))) |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 596 | continue; |
| 597 | |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 598 | v = x86_pmu.lbr_sel_map[i]; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 599 | if (v == LBR_NOT_SUPP) |
| 600 | return -EOPNOTSUPP; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 601 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 602 | if (v != LBR_IGN) |
| 603 | mask |= v; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 604 | } |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 605 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 606 | reg = &event->hw.branch_reg; |
| 607 | reg->idx = EXTRA_REG_LBR; |
| 608 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 609 | /* |
| 610 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate |
| 611 | * in suppress mode. So LBR_SELECT should be set to |
| 612 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) |
| 613 | */ |
| 614 | reg->config = mask ^ x86_pmu.lbr_sel_mask; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 615 | |
Andi Kleen | b16a5b5 | 2015-10-20 11:46:34 -0700 | [diff] [blame] | 616 | if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && |
| 617 | (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && |
| 618 | (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)) |
| 619 | reg->config |= LBR_NO_INFO; |
| 620 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 621 | return 0; |
| 622 | } |
| 623 | |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 624 | int intel_pmu_setup_lbr_filter(struct perf_event *event) |
| 625 | { |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 626 | int ret = 0; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 627 | |
| 628 | /* |
| 629 | * no LBR on this PMU |
| 630 | */ |
| 631 | if (!x86_pmu.lbr_nr) |
| 632 | return -EOPNOTSUPP; |
| 633 | |
| 634 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 635 | * setup SW LBR filter |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 636 | */ |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 637 | ret = intel_pmu_setup_sw_lbr_filter(event); |
| 638 | if (ret) |
| 639 | return ret; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 640 | |
| 641 | /* |
| 642 | * setup HW LBR filter, if any |
| 643 | */ |
| 644 | if (x86_pmu.lbr_sel_map) |
| 645 | ret = intel_pmu_setup_hw_lbr_filter(event); |
| 646 | |
| 647 | return ret; |
| 648 | } |
| 649 | |
| 650 | /* |
| 651 | * return the type of control flow change at address "from" |
| 652 | * intruction is not necessarily a branch (in case of interrupt). |
| 653 | * |
| 654 | * The branch type returned also includes the priv level of the |
| 655 | * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). |
| 656 | * |
| 657 | * If a branch type is unknown OR the instruction cannot be |
| 658 | * decoded (e.g., text page not present), then X86_BR_NONE is |
| 659 | * returned. |
| 660 | */ |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 661 | static int branch_type(unsigned long from, unsigned long to, int abort) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 662 | { |
| 663 | struct insn insn; |
| 664 | void *addr; |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 665 | int bytes_read, bytes_left; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 666 | int ret = X86_BR_NONE; |
| 667 | int ext, to_plm, from_plm; |
| 668 | u8 buf[MAX_INSN_SIZE]; |
| 669 | int is64 = 0; |
| 670 | |
| 671 | to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; |
| 672 | from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; |
| 673 | |
| 674 | /* |
| 675 | * maybe zero if lbr did not fill up after a reset by the time |
| 676 | * we get a PMU interrupt |
| 677 | */ |
| 678 | if (from == 0 || to == 0) |
| 679 | return X86_BR_NONE; |
| 680 | |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 681 | if (abort) |
| 682 | return X86_BR_ABORT | to_plm; |
| 683 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 684 | if (from_plm == X86_BR_USER) { |
| 685 | /* |
| 686 | * can happen if measuring at the user level only |
| 687 | * and we interrupt in a kernel thread, e.g., idle. |
| 688 | */ |
| 689 | if (!current->mm) |
| 690 | return X86_BR_NONE; |
| 691 | |
| 692 | /* may fail if text not present */ |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 693 | bytes_left = copy_from_user_nmi(buf, (void __user *)from, |
| 694 | MAX_INSN_SIZE); |
| 695 | bytes_read = MAX_INSN_SIZE - bytes_left; |
| 696 | if (!bytes_read) |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 697 | return X86_BR_NONE; |
| 698 | |
| 699 | addr = buf; |
Peter Zijlstra | 6e15eb3 | 2013-05-03 14:11:24 +0200 | [diff] [blame] | 700 | } else { |
| 701 | /* |
| 702 | * The LBR logs any address in the IP, even if the IP just |
| 703 | * faulted. This means userspace can control the from address. |
| 704 | * Ensure we don't blindy read any address by validating it is |
| 705 | * a known text address. |
| 706 | */ |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 707 | if (kernel_text_address(from)) { |
Peter Zijlstra | 6e15eb3 | 2013-05-03 14:11:24 +0200 | [diff] [blame] | 708 | addr = (void *)from; |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 709 | /* |
| 710 | * Assume we can get the maximum possible size |
| 711 | * when grabbing kernel data. This is not |
| 712 | * _strictly_ true since we could possibly be |
| 713 | * executing up next to a memory hole, but |
| 714 | * it is very unlikely to be a problem. |
| 715 | */ |
| 716 | bytes_read = MAX_INSN_SIZE; |
| 717 | } else { |
Peter Zijlstra | 6e15eb3 | 2013-05-03 14:11:24 +0200 | [diff] [blame] | 718 | return X86_BR_NONE; |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 719 | } |
Peter Zijlstra | 6e15eb3 | 2013-05-03 14:11:24 +0200 | [diff] [blame] | 720 | } |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 721 | |
| 722 | /* |
| 723 | * decoder needs to know the ABI especially |
| 724 | * on 64-bit systems running 32-bit apps |
| 725 | */ |
| 726 | #ifdef CONFIG_X86_64 |
| 727 | is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); |
| 728 | #endif |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 729 | insn_init(&insn, addr, bytes_read, is64); |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 730 | insn_get_opcode(&insn); |
Dave Hansen | 6ba48ff | 2014-11-14 07:39:57 -0800 | [diff] [blame] | 731 | if (!insn.opcode.got) |
| 732 | return X86_BR_ABORT; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 733 | |
| 734 | switch (insn.opcode.bytes[0]) { |
| 735 | case 0xf: |
| 736 | switch (insn.opcode.bytes[1]) { |
| 737 | case 0x05: /* syscall */ |
| 738 | case 0x34: /* sysenter */ |
| 739 | ret = X86_BR_SYSCALL; |
| 740 | break; |
| 741 | case 0x07: /* sysret */ |
| 742 | case 0x35: /* sysexit */ |
| 743 | ret = X86_BR_SYSRET; |
| 744 | break; |
| 745 | case 0x80 ... 0x8f: /* conditional */ |
| 746 | ret = X86_BR_JCC; |
| 747 | break; |
| 748 | default: |
| 749 | ret = X86_BR_NONE; |
| 750 | } |
| 751 | break; |
| 752 | case 0x70 ... 0x7f: /* conditional */ |
| 753 | ret = X86_BR_JCC; |
| 754 | break; |
| 755 | case 0xc2: /* near ret */ |
| 756 | case 0xc3: /* near ret */ |
| 757 | case 0xca: /* far ret */ |
| 758 | case 0xcb: /* far ret */ |
| 759 | ret = X86_BR_RET; |
| 760 | break; |
| 761 | case 0xcf: /* iret */ |
| 762 | ret = X86_BR_IRET; |
| 763 | break; |
| 764 | case 0xcc ... 0xce: /* int */ |
| 765 | ret = X86_BR_INT; |
| 766 | break; |
| 767 | case 0xe8: /* call near rel */ |
Yan, Zheng | aa54ae9 | 2014-11-04 21:56:11 -0500 | [diff] [blame] | 768 | insn_get_immediate(&insn); |
| 769 | if (insn.immediate1.value == 0) { |
| 770 | /* zero length call */ |
| 771 | ret = X86_BR_ZERO_CALL; |
| 772 | break; |
| 773 | } |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 774 | case 0x9a: /* call far absolute */ |
| 775 | ret = X86_BR_CALL; |
| 776 | break; |
| 777 | case 0xe0 ... 0xe3: /* loop jmp */ |
| 778 | ret = X86_BR_JCC; |
| 779 | break; |
| 780 | case 0xe9 ... 0xeb: /* jmp */ |
| 781 | ret = X86_BR_JMP; |
| 782 | break; |
| 783 | case 0xff: /* call near absolute, call far absolute ind */ |
| 784 | insn_get_modrm(&insn); |
| 785 | ext = (insn.modrm.bytes[0] >> 3) & 0x7; |
| 786 | switch (ext) { |
| 787 | case 2: /* near ind call */ |
| 788 | case 3: /* far ind call */ |
| 789 | ret = X86_BR_IND_CALL; |
| 790 | break; |
| 791 | case 4: |
| 792 | case 5: |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 793 | ret = X86_BR_IND_JMP; |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 794 | break; |
| 795 | } |
| 796 | break; |
| 797 | default: |
| 798 | ret = X86_BR_NONE; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 799 | } |
| 800 | /* |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 801 | * interrupts, traps, faults (and thus ring transition) may |
| 802 | * occur on any instructions. Thus, to classify them correctly, |
| 803 | * we need to first look at the from and to priv levels. If they |
| 804 | * are different and to is in the kernel, then it indicates |
| 805 | * a ring transition. If the from instruction is not a ring |
| 806 | * transition instr (syscall, systenter, int), then it means |
| 807 | * it was a irq, trap or fault. |
| 808 | * |
| 809 | * we have no way of detecting kernel to kernel faults. |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 810 | */ |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 811 | if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL |
| 812 | && ret != X86_BR_SYSCALL && ret != X86_BR_INT) |
| 813 | ret = X86_BR_IRQ; |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 814 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 815 | /* |
| 816 | * branch priv level determined by target as |
| 817 | * is done by HW when LBR_SELECT is implemented |
| 818 | */ |
| 819 | if (ret != X86_BR_NONE) |
| 820 | ret |= to_plm; |
| 821 | |
| 822 | return ret; |
| 823 | } |
| 824 | |
| 825 | /* |
| 826 | * implement actual branch filter based on user demand. |
| 827 | * Hardware may not exactly satisfy that request, thus |
| 828 | * we need to inspect opcodes. Mismatched branches are |
| 829 | * discarded. Therefore, the number of branches returned |
| 830 | * in PERF_SAMPLE_BRANCH_STACK sample may vary. |
| 831 | */ |
| 832 | static void |
| 833 | intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) |
| 834 | { |
| 835 | u64 from, to; |
| 836 | int br_sel = cpuc->br_sel; |
| 837 | int i, j, type; |
| 838 | bool compress = false; |
| 839 | |
| 840 | /* if sampling all branches, then nothing to filter */ |
| 841 | if ((br_sel & X86_BR_ALL) == X86_BR_ALL) |
| 842 | return; |
| 843 | |
| 844 | for (i = 0; i < cpuc->lbr_stack.nr; i++) { |
| 845 | |
| 846 | from = cpuc->lbr_entries[i].from; |
| 847 | to = cpuc->lbr_entries[i].to; |
| 848 | |
Andi Kleen | 135c561 | 2013-06-17 17:36:51 -0700 | [diff] [blame] | 849 | type = branch_type(from, to, cpuc->lbr_entries[i].abort); |
| 850 | if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) { |
| 851 | if (cpuc->lbr_entries[i].in_tx) |
| 852 | type |= X86_BR_IN_TX; |
| 853 | else |
| 854 | type |= X86_BR_NO_TX; |
| 855 | } |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 856 | |
| 857 | /* if type does not correspond, then discard */ |
| 858 | if (type == X86_BR_NONE || (br_sel & type) != type) { |
| 859 | cpuc->lbr_entries[i].from = 0; |
| 860 | compress = true; |
| 861 | } |
| 862 | } |
| 863 | |
| 864 | if (!compress) |
| 865 | return; |
| 866 | |
| 867 | /* remove all entries with from=0 */ |
| 868 | for (i = 0; i < cpuc->lbr_stack.nr; ) { |
| 869 | if (!cpuc->lbr_entries[i].from) { |
| 870 | j = i; |
| 871 | while (++j < cpuc->lbr_stack.nr) |
| 872 | cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; |
| 873 | cpuc->lbr_stack.nr--; |
| 874 | if (!cpuc->lbr_entries[i].from) |
| 875 | continue; |
| 876 | } |
| 877 | i++; |
| 878 | } |
Stephane Eranian | 60ce0fb | 2012-02-09 23:20:57 +0100 | [diff] [blame] | 879 | } |
| 880 | |
| 881 | /* |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 882 | * Map interface branch filters onto LBR filters |
| 883 | */ |
Peter Zijlstra | 2c44b19 | 2014-11-05 10:36:45 +0100 | [diff] [blame] | 884 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 885 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
| 886 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, |
| 887 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, |
| 888 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, |
| 889 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP |
| 890 | | LBR_IND_JMP | LBR_FAR, |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 891 | /* |
| 892 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches |
| 893 | */ |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 894 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 895 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, |
| 896 | /* |
| 897 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL |
| 898 | */ |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 899 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP, |
| 900 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 901 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 902 | }; |
| 903 | |
Peter Zijlstra | 2c44b19 | 2014-11-05 10:36:45 +0100 | [diff] [blame] | 904 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
Yan, Zheng | 27ac905 | 2014-11-04 21:55:57 -0500 | [diff] [blame] | 905 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
| 906 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, |
| 907 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, |
| 908 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, |
| 909 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, |
| 910 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL |
| 911 | | LBR_FAR, |
| 912 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, |
| 913 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 914 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
Stephane Eranian | d892819 | 2015-10-13 09:09:09 +0200 | [diff] [blame] | 915 | [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL, |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 916 | }; |
| 917 | |
Peter Zijlstra | 2c44b19 | 2014-11-05 10:36:45 +0100 | [diff] [blame] | 918 | static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 919 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
| 920 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, |
| 921 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, |
| 922 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, |
| 923 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, |
| 924 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL |
| 925 | | LBR_FAR, |
| 926 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, |
| 927 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, |
| 928 | [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL |
| 929 | | LBR_RETURN | LBR_CALL_STACK, |
Stephane Eranian | 7b74cfb | 2015-05-14 23:09:59 +0200 | [diff] [blame] | 930 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
Stephane Eranian | d892819 | 2015-10-13 09:09:09 +0200 | [diff] [blame] | 931 | [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL, |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 932 | }; |
| 933 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 934 | /* core */ |
Mathias Krause | 066ce64 | 2014-08-26 18:49:45 +0200 | [diff] [blame] | 935 | void __init intel_pmu_lbr_init_core(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 936 | { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 937 | x86_pmu.lbr_nr = 4; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 938 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 939 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
| 940 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 941 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 942 | /* |
| 943 | * SW branch filter usage: |
| 944 | * - compensate for lack of HW filter |
| 945 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 946 | pr_cont("4-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 947 | } |
| 948 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 949 | /* nehalem/westmere */ |
Mathias Krause | 066ce64 | 2014-08-26 18:49:45 +0200 | [diff] [blame] | 950 | void __init intel_pmu_lbr_init_nhm(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 951 | { |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 952 | x86_pmu.lbr_nr = 16; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 953 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 954 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 955 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 956 | |
| 957 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 958 | x86_pmu.lbr_sel_map = nhm_lbr_sel_map; |
| 959 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 960 | /* |
| 961 | * SW branch filter usage: |
| 962 | * - workaround LBR_SEL errata (see above) |
| 963 | * - support syscall, sysret capture. |
| 964 | * That requires LBR_FAR but that means far |
| 965 | * jmp need to be filtered out |
| 966 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 967 | pr_cont("16-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 968 | } |
| 969 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 970 | /* sandy bridge */ |
Mathias Krause | 066ce64 | 2014-08-26 18:49:45 +0200 | [diff] [blame] | 971 | void __init intel_pmu_lbr_init_snb(void) |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 972 | { |
| 973 | x86_pmu.lbr_nr = 16; |
| 974 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 975 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 976 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
| 977 | |
| 978 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 979 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; |
| 980 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 981 | /* |
| 982 | * SW branch filter usage: |
| 983 | * - support syscall, sysret capture. |
| 984 | * That requires LBR_FAR but that means far |
| 985 | * jmp need to be filtered out |
| 986 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 987 | pr_cont("16-deep LBR, "); |
| 988 | } |
| 989 | |
Yan, Zheng | e9d7f7cd | 2014-11-04 21:56:00 -0500 | [diff] [blame] | 990 | /* haswell */ |
| 991 | void intel_pmu_lbr_init_hsw(void) |
| 992 | { |
| 993 | x86_pmu.lbr_nr = 16; |
| 994 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 995 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 996 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
| 997 | |
| 998 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 999 | x86_pmu.lbr_sel_map = hsw_lbr_sel_map; |
| 1000 | |
| 1001 | pr_cont("16-deep LBR, "); |
| 1002 | } |
| 1003 | |
Andi Kleen | 9a92e16 | 2015-05-10 12:22:44 -0700 | [diff] [blame] | 1004 | /* skylake */ |
| 1005 | __init void intel_pmu_lbr_init_skl(void) |
| 1006 | { |
| 1007 | x86_pmu.lbr_nr = 32; |
| 1008 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 1009 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 1010 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
| 1011 | |
| 1012 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 1013 | x86_pmu.lbr_sel_map = hsw_lbr_sel_map; |
| 1014 | |
| 1015 | /* |
| 1016 | * SW branch filter usage: |
| 1017 | * - support syscall, sysret capture. |
| 1018 | * That requires LBR_FAR but that means far |
| 1019 | * jmp need to be filtered out |
| 1020 | */ |
| 1021 | pr_cont("32-deep LBR, "); |
| 1022 | } |
| 1023 | |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 1024 | /* atom */ |
Mathias Krause | 066ce64 | 2014-08-26 18:49:45 +0200 | [diff] [blame] | 1025 | void __init intel_pmu_lbr_init_atom(void) |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 1026 | { |
Stephane Eranian | 88c9a65 | 2012-02-09 23:20:56 +0100 | [diff] [blame] | 1027 | /* |
| 1028 | * only models starting at stepping 10 seems |
| 1029 | * to have an operational LBR which can freeze |
| 1030 | * on PMU interrupt |
| 1031 | */ |
Stephane Eranian | 3ec18cd | 2012-08-20 11:24:21 +0200 | [diff] [blame] | 1032 | if (boot_cpu_data.x86_model == 28 |
| 1033 | && boot_cpu_data.x86_mask < 10) { |
Stephane Eranian | 88c9a65 | 2012-02-09 23:20:56 +0100 | [diff] [blame] | 1034 | pr_cont("LBR disabled due to erratum"); |
| 1035 | return; |
| 1036 | } |
| 1037 | |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 1038 | x86_pmu.lbr_nr = 8; |
Stephane Eranian | 225ce53 | 2012-02-09 23:20:52 +0100 | [diff] [blame] | 1039 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 1040 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
| 1041 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 1042 | |
Stephane Eranian | 3e702ff | 2012-02-09 23:20:58 +0100 | [diff] [blame] | 1043 | /* |
| 1044 | * SW branch filter usage: |
| 1045 | * - compensate for lack of HW filter |
| 1046 | */ |
Stephane Eranian | c5cc2cd | 2012-02-09 23:20:55 +0100 | [diff] [blame] | 1047 | pr_cont("8-deep LBR, "); |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 1048 | } |
Harish Chegondi | 1e7b939 | 2015-12-07 14:28:18 -0800 | [diff] [blame] | 1049 | |
| 1050 | /* Knights Landing */ |
| 1051 | void intel_pmu_lbr_init_knl(void) |
| 1052 | { |
| 1053 | x86_pmu.lbr_nr = 8; |
| 1054 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
| 1055 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
| 1056 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
| 1057 | |
| 1058 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; |
| 1059 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; |
| 1060 | |
| 1061 | pr_cont("8-deep LBR, "); |
| 1062 | } |