blob: 2ed3f2a51bdfd3a47374a58cb1117e6690fd9f6a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kevin Winchesterde0428a2011-08-30 20:41:05 -03002#include <linux/perf_event.h>
3#include <linux/types.h>
4
5#include <asm/perf_event.h>
6#include <asm/msr.h>
Stephane Eranian3e702ff2012-02-09 23:20:58 +01007#include <asm/insn.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -03008
Borislav Petkov27f6d222016-02-10 10:55:23 +01009#include "../perf_event.h"
Peter Zijlstracaff2be2010-03-03 12:02:30 +010010
11enum {
12 LBR_FORMAT_32 = 0x00,
13 LBR_FORMAT_LIP = 0x01,
14 LBR_FORMAT_EIP = 0x02,
15 LBR_FORMAT_EIP_FLAGS = 0x03,
Andi Kleen135c5612013-06-17 17:36:51 -070016 LBR_FORMAT_EIP_FLAGS2 = 0x04,
Andi Kleen50eab8f2015-05-10 12:22:43 -070017 LBR_FORMAT_INFO = 0x05,
Kan Liang8b92c3a2016-04-15 00:42:47 -070018 LBR_FORMAT_TIME = 0x06,
19 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
Andi Kleen135c5612013-06-17 17:36:51 -070020};
21
Colin Ian Kinge91c8d92017-06-29 10:14:06 +010022static const enum {
Andi Kleen135c5612013-06-17 17:36:51 -070023 LBR_EIP_FLAGS = 1,
24 LBR_TSX = 2,
25} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
26 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
27 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
Peter Zijlstracaff2be2010-03-03 12:02:30 +010028};
29
30/*
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010031 * Intel LBR_SELECT bits
32 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 *
34 * Hardware branch filter (not available on all CPUs)
35 */
36#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
37#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
38#define LBR_JCC_BIT 2 /* do not capture conditional branches */
39#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
40#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
41#define LBR_RETURN_BIT 5 /* do not capture near returns */
42#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
43#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
44#define LBR_FAR_BIT 8 /* do not capture far branches */
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -050045#define LBR_CALL_STACK_BIT 9 /* enable call stack */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010046
Andi Kleenb16a5b52015-10-20 11:46:34 -070047/*
48 * Following bit only exists in Linux; we mask it out before writing it to
49 * the actual MSR. But it helps the constraint perf code to understand
50 * that this is a separate configuration.
51 */
52#define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010054#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
55#define LBR_USER (1 << LBR_USER_BIT)
56#define LBR_JCC (1 << LBR_JCC_BIT)
57#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
58#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
59#define LBR_RETURN (1 << LBR_RETURN_BIT)
60#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
61#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
62#define LBR_FAR (1 << LBR_FAR_BIT)
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -050063#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
Andi Kleenb16a5b52015-10-20 11:46:34 -070064#define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010065
66#define LBR_PLM (LBR_KERNEL | LBR_USER)
67
Kan Liangcf3beb72016-04-21 02:30:10 -070068#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010069#define LBR_NOT_SUPP -1 /* LBR filter not supported */
70#define LBR_IGN 0 /* ignored */
71
72#define LBR_ANY \
73 (LBR_JCC |\
74 LBR_REL_CALL |\
75 LBR_IND_CALL |\
76 LBR_RETURN |\
77 LBR_REL_JMP |\
78 LBR_IND_JMP |\
79 LBR_FAR)
80
David Carrillo-Cisneros3812bba2016-06-21 11:31:12 -070081#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
82#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
83#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010084
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -070085#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
86
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010087/*
Stephane Eranian3e702ff2012-02-09 23:20:58 +010088 * x86control flow change classification
89 * x86control flow changes include branches, interrupts, traps, faults
90 */
91enum {
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -050092 X86_BR_NONE = 0, /* unknown */
Stephane Eranian3e702ff2012-02-09 23:20:58 +010093
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -050094 X86_BR_USER = 1 << 0, /* branch target is user */
95 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
Stephane Eranian3e702ff2012-02-09 23:20:58 +010096
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -050097 X86_BR_CALL = 1 << 2, /* call */
98 X86_BR_RET = 1 << 3, /* return */
99 X86_BR_SYSCALL = 1 << 4, /* syscall */
100 X86_BR_SYSRET = 1 << 5, /* syscall return */
101 X86_BR_INT = 1 << 6, /* sw interrupt */
102 X86_BR_IRET = 1 << 7, /* return from interrupt */
103 X86_BR_JCC = 1 << 8, /* conditional */
104 X86_BR_JMP = 1 << 9, /* jump */
105 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
106 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
107 X86_BR_ABORT = 1 << 12,/* transaction abort */
108 X86_BR_IN_TX = 1 << 13,/* in transaction */
109 X86_BR_NO_TX = 1 << 14,/* not in transaction */
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500110 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
111 X86_BR_CALL_STACK = 1 << 16,/* call stack */
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200112 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
Jin Yaod5c7f9d2017-07-18 20:13:10 +0800113
114 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
115
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100116};
117
118#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
Andi Kleen135c5612013-06-17 17:36:51 -0700119#define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100120
121#define X86_BR_ANY \
122 (X86_BR_CALL |\
123 X86_BR_RET |\
124 X86_BR_SYSCALL |\
125 X86_BR_SYSRET |\
126 X86_BR_INT |\
127 X86_BR_IRET |\
128 X86_BR_JCC |\
129 X86_BR_JMP |\
130 X86_BR_IRQ |\
Andi Kleen135c5612013-06-17 17:36:51 -0700131 X86_BR_ABORT |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500132 X86_BR_IND_CALL |\
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200133 X86_BR_IND_JMP |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500134 X86_BR_ZERO_CALL)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100135
136#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
137
138#define X86_BR_ANY_CALL \
139 (X86_BR_CALL |\
140 X86_BR_IND_CALL |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500141 X86_BR_ZERO_CALL |\
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100142 X86_BR_SYSCALL |\
143 X86_BR_IRQ |\
144 X86_BR_INT)
145
146static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
147
148/*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100149 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
150 * otherwise it becomes near impossible to get a reliable stack.
151 */
152
Andi Kleen1a78d932015-03-20 10:11:23 -0700153static void __intel_pmu_lbr_enable(bool pmi)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100154{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500155 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Andi Kleencd1f11d2015-03-20 10:11:24 -0700156 u64 debugctl, lbr_select = 0, orig_debugctl;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100157
Andi Kleen1a78d932015-03-20 10:11:23 -0700158 /*
Andi Kleen425507f2015-05-10 12:22:46 -0700159 * No need to unfreeze manually, as v4 can do that as part
160 * of the GLOBAL_STATUS ack.
161 */
162 if (pmi && x86_pmu.version >= 4)
163 return;
164
165 /*
Andi Kleen1a78d932015-03-20 10:11:23 -0700166 * No need to reprogram LBR_SELECT in a PMI, as it
167 * did not change.
168 */
Kan Liang96f3eda2015-09-14 10:14:07 -0400169 if (cpuc->lbr_sel)
Andi Kleenb16a5b52015-10-20 11:46:34 -0700170 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100171 if (!pmi && cpuc->lbr_sel)
Yan, Zheng2c70d002014-11-04 21:56:10 -0500172 wrmsrl(MSR_LBR_SELECT, lbr_select);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100173
174 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Andi Kleencd1f11d2015-03-20 10:11:24 -0700175 orig_debugctl = debugctl;
Yan, Zheng2c70d002014-11-04 21:56:10 -0500176 debugctl |= DEBUGCTLMSR_LBR;
177 /*
178 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
179 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
180 * may cause superfluous increase/decrease of LBR_TOS.
181 */
182 if (!(lbr_select & LBR_CALL_STACK))
183 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
Andi Kleencd1f11d2015-03-20 10:11:24 -0700184 if (orig_debugctl != debugctl)
185 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100186}
187
188static void __intel_pmu_lbr_disable(void)
189{
190 u64 debugctl;
191
192 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100193 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100194 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
195}
196
197static void intel_pmu_lbr_reset_32(void)
198{
199 int i;
200
201 for (i = 0; i < x86_pmu.lbr_nr; i++)
202 wrmsrl(x86_pmu.lbr_from + i, 0);
203}
204
205static void intel_pmu_lbr_reset_64(void)
206{
207 int i;
208
209 for (i = 0; i < x86_pmu.lbr_nr; i++) {
210 wrmsrl(x86_pmu.lbr_from + i, 0);
211 wrmsrl(x86_pmu.lbr_to + i, 0);
Andi Kleen50eab8f2015-05-10 12:22:43 -0700212 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
213 wrmsrl(MSR_LBR_INFO_0 + i, 0);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100214 }
215}
216
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300217void intel_pmu_lbr_reset(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100218{
Kan Liang8b077e4a2018-06-05 08:38:46 -0700219 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
220
Peter Zijlstra74846d32010-03-05 13:49:35 +0100221 if (!x86_pmu.lbr_nr)
222 return;
223
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100224 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100225 intel_pmu_lbr_reset_32();
226 else
227 intel_pmu_lbr_reset_64();
Kan Liang8b077e4a2018-06-05 08:38:46 -0700228
229 cpuc->last_task_ctx = NULL;
230 cpuc->last_log_id = 0;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100231}
232
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500233/*
234 * TOS = most recently recorded branch
235 */
236static inline u64 intel_pmu_lbr_tos(void)
237{
238 u64 tos;
239
240 rdmsrl(x86_pmu.lbr_tos, tos);
241 return tos;
242}
243
244enum {
245 LBR_NONE,
246 LBR_VALID,
247};
248
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -0700249/*
250 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
251 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
252 * TSX is not supported they have no consistent behavior:
253 *
254 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
255 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
256 * part of the sign extension.
257 *
258 * Therefore, if:
259 *
260 * 1) LBR has TSX format
261 * 2) CPU has no TSX support enabled
262 *
263 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
264 * value from rdmsr() must be converted to have a 61 bits sign extension,
265 * ignoring the TSX flags.
266 */
267static inline bool lbr_from_signext_quirk_needed(void)
268{
269 int lbr_format = x86_pmu.intel_cap.lbr_format;
270 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
271 boot_cpu_has(X86_FEATURE_RTM);
272
273 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
274}
275
Valdis Klētnieksd9f3b452019-08-08 13:44:02 -0400276static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -0700277
278/* If quirk is enabled, ensure sign extension is 63 bits: */
279inline u64 lbr_from_signext_quirk_wr(u64 val)
280{
281 if (static_branch_unlikely(&lbr_from_quirk_key)) {
282 /*
283 * Sign extend into bits 61:62 while preserving bit 63.
284 *
285 * Quirk is enabled when TSX is disabled. Therefore TSX bits
286 * in val are always OFF and must be changed to be sign
287 * extension bits. Since bits 59:60 are guaranteed to be
288 * part of the sign extension bits, we can just copy them
289 * to 61:62.
290 */
291 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
292 }
293 return val;
294}
295
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700296/*
297 * If quirk is needed, ensure sign extension is 61 bits:
298 */
Colin Ian Kinge91c8d92017-06-29 10:14:06 +0100299static u64 lbr_from_signext_quirk_rd(u64 val)
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700300{
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200301 if (static_branch_unlikely(&lbr_from_quirk_key)) {
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700302 /*
303 * Quirk is on when TSX is not enabled. Therefore TSX
304 * flags must be read as OFF.
305 */
306 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200307 }
308 return val;
309}
310
311static inline void wrlbr_from(unsigned int idx, u64 val)
312{
313 val = lbr_from_signext_quirk_wr(val);
314 wrmsrl(x86_pmu.lbr_from + idx, val);
315}
316
317static inline void wrlbr_to(unsigned int idx, u64 val)
318{
319 wrmsrl(x86_pmu.lbr_to + idx, val);
320}
321
322static inline u64 rdlbr_from(unsigned int idx)
323{
324 u64 val;
325
326 rdmsrl(x86_pmu.lbr_from + idx, val);
327
328 return lbr_from_signext_quirk_rd(val);
329}
330
331static inline u64 rdlbr_to(unsigned int idx)
332{
333 u64 val;
334
Peter Zijlstraaefbc4d2016-06-30 11:49:08 +0200335 rdmsrl(x86_pmu.lbr_to + idx, val);
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200336
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700337 return val;
338}
339
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500340static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
341{
Kan Liang8b077e4a2018-06-05 08:38:46 -0700342 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500343 int i;
344 unsigned lbr_idx, mask;
345 u64 tos;
346
347 if (task_ctx->lbr_callstack_users == 0 ||
348 task_ctx->lbr_stack_state == LBR_NONE) {
349 intel_pmu_lbr_reset();
350 return;
351 }
352
Andi Kleenb28ae952015-10-20 11:46:33 -0700353 tos = task_ctx->tos;
Kan Liang8b077e4a2018-06-05 08:38:46 -0700354 /*
355 * Does not restore the LBR registers, if
356 * - No one else touched them, and
357 * - Did not enter C6
358 */
359 if ((task_ctx == cpuc->last_task_ctx) &&
360 (task_ctx->log_id == cpuc->last_log_id) &&
361 rdlbr_from(tos)) {
362 task_ctx->lbr_stack_state = LBR_NONE;
363 return;
364 }
365
366 mask = x86_pmu.lbr_nr - 1;
Kan Liang0592e572018-06-05 08:38:45 -0700367 for (i = 0; i < task_ctx->valid_lbrs; i++) {
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500368 lbr_idx = (tos - i) & mask;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200369 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
370 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
371
Andi Kleen50eab8f2015-05-10 12:22:43 -0700372 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
Andi Kleene0573362015-05-27 21:13:17 -0700373 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500374 }
Kan Liang0592e572018-06-05 08:38:45 -0700375
376 for (; i < x86_pmu.lbr_nr; i++) {
377 lbr_idx = (tos - i) & mask;
378 wrlbr_from(lbr_idx, 0);
379 wrlbr_to(lbr_idx, 0);
380 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
381 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
382 }
383
Andi Kleenb28ae952015-10-20 11:46:33 -0700384 wrmsrl(x86_pmu.lbr_tos, tos);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500385 task_ctx->lbr_stack_state = LBR_NONE;
386}
387
388static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
389{
Kan Liang8b077e4a2018-06-05 08:38:46 -0700390 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500391 unsigned lbr_idx, mask;
Kan Liang0592e572018-06-05 08:38:45 -0700392 u64 tos, from;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200393 int i;
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500394
395 if (task_ctx->lbr_callstack_users == 0) {
396 task_ctx->lbr_stack_state = LBR_NONE;
397 return;
398 }
399
400 mask = x86_pmu.lbr_nr - 1;
401 tos = intel_pmu_lbr_tos();
Kan Liang0592e572018-06-05 08:38:45 -0700402 for (i = 0; i < x86_pmu.lbr_nr; i++) {
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500403 lbr_idx = (tos - i) & mask;
Kan Liang0592e572018-06-05 08:38:45 -0700404 from = rdlbr_from(lbr_idx);
405 if (!from)
406 break;
407 task_ctx->lbr_from[i] = from;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200408 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
Andi Kleen50eab8f2015-05-10 12:22:43 -0700409 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
Andi Kleene0573362015-05-27 21:13:17 -0700410 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500411 }
Kan Liang0592e572018-06-05 08:38:45 -0700412 task_ctx->valid_lbrs = i;
Andi Kleenb28ae952015-10-20 11:46:33 -0700413 task_ctx->tos = tos;
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500414 task_ctx->lbr_stack_state = LBR_VALID;
Kan Liang8b077e4a2018-06-05 08:38:46 -0700415
416 cpuc->last_task_ctx = task_ctx;
417 cpuc->last_log_id = ++task_ctx->log_id;
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500418}
419
Alexey Budankov421ca862019-10-23 10:12:54 +0300420void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
421 struct perf_event_context *next)
422{
423 struct x86_perf_task_context *prev_ctx_data, *next_ctx_data;
424
425 swap(prev->task_ctx_data, next->task_ctx_data);
426
427 /*
428 * Architecture specific synchronization makes sense in
429 * case both prev->task_ctx_data and next->task_ctx_data
430 * pointers are allocated.
431 */
432
433 prev_ctx_data = next->task_ctx_data;
434 next_ctx_data = prev->task_ctx_data;
435
436 if (!prev_ctx_data || !next_ctx_data)
437 return;
438
439 swap(prev_ctx_data->lbr_callstack_users,
440 next_ctx_data->lbr_callstack_users);
441}
442
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500443void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
444{
Jiri Olsadf6c3db2017-07-19 09:52:47 +0200445 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500446 struct x86_perf_task_context *task_ctx;
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500447
Jiri Olsadf6c3db2017-07-19 09:52:47 +0200448 if (!cpuc->lbr_users)
449 return;
450
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500451 /*
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500452 * If LBR callstack feature is enabled and the stack was saved when
453 * the task was scheduled out, restore the stack. Otherwise flush
454 * the LBR stack.
455 */
456 task_ctx = ctx ? ctx->task_ctx_data : NULL;
457 if (task_ctx) {
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200458 if (sched_in)
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500459 __intel_pmu_lbr_restore(task_ctx);
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200460 else
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500461 __intel_pmu_lbr_save(task_ctx);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500462 return;
463 }
464
465 /*
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200466 * Since a context switch can flip the address space and LBR entries
467 * are not tagged with an identifier, we need to wipe the LBR, even for
468 * per-cpu events. You simply cannot resolve the branches from the old
469 * address space.
470 */
471 if (sched_in)
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500472 intel_pmu_lbr_reset();
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500473}
474
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500475static inline bool branch_user_callstack(unsigned br_sel)
476{
477 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
478}
479
Peter Zijlstra68f70822016-07-06 18:02:43 +0200480void intel_pmu_lbr_add(struct perf_event *event)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100481{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500482 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500483 struct x86_perf_task_context *task_ctx;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100484
485 if (!x86_pmu.lbr_nr)
486 return;
487
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100488 cpuc->br_sel = event->hw.branch_reg.reg;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100489
Peter Zijlstraa5dcff62016-07-07 19:37:52 +0200490 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500491 task_ctx = event->ctx->task_ctx_data;
492 task_ctx->lbr_callstack_users++;
493 }
494
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200495 /*
496 * Request pmu::sched_task() callback, which will fire inside the
497 * regular perf event scheduling, so that call will:
498 *
499 * - restore or wipe; when LBR-callstack,
500 * - wipe; otherwise,
501 *
502 * when this is from __perf_event_task_sched_in().
503 *
504 * However, if this is from perf_install_in_context(), no such callback
505 * will follow and we'll need to reset the LBR here if this is the
506 * first LBR event.
507 *
508 * The problem is, we cannot tell these cases apart... but we can
509 * exclude the biggest chunk of cases by looking at
510 * event->total_time_running. An event that has accrued runtime cannot
511 * be 'new'. Conversely, a new event can get installed through the
512 * context switch path for the first time.
513 */
Andi Kleend3617b982019-04-02 12:45:03 -0700514 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
515 cpuc->lbr_pebs_users++;
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500516 perf_sched_cb_inc(event->ctx->pmu);
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200517 if (!cpuc->lbr_users++ && !event->total_time_running)
518 intel_pmu_lbr_reset();
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100519}
520
Peter Zijlstra68f70822016-07-06 18:02:43 +0200521void intel_pmu_lbr_del(struct perf_event *event)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100522{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500523 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500524 struct x86_perf_task_context *task_ctx;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100525
526 if (!x86_pmu.lbr_nr)
527 return;
528
Dan Carpenter5c381812016-10-14 10:29:08 +0300529 if (branch_user_callstack(cpuc->br_sel) &&
530 event->ctx->task_ctx_data) {
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500531 task_ctx = event->ctx->task_ctx_data;
532 task_ctx->lbr_callstack_users--;
533 }
534
Andi Kleend3617b982019-04-02 12:45:03 -0700535 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
536 cpuc->lbr_pebs_users--;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100537 cpuc->lbr_users--;
Peter Zijlstrab83a46e2010-03-08 13:51:12 +0100538 WARN_ON_ONCE(cpuc->lbr_users < 0);
Andi Kleend3617b982019-04-02 12:45:03 -0700539 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500540 perf_sched_cb_dec(event->ctx->pmu);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100541}
542
Andi Kleen1a78d932015-03-20 10:11:23 -0700543void intel_pmu_lbr_enable_all(bool pmi)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100544{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500545 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100546
547 if (cpuc->lbr_users)
Andi Kleen1a78d932015-03-20 10:11:23 -0700548 __intel_pmu_lbr_enable(pmi);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100549}
550
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300551void intel_pmu_lbr_disable_all(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100552{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500553 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100554
555 if (cpuc->lbr_users)
556 __intel_pmu_lbr_disable();
557}
558
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100559static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
560{
561 unsigned long mask = x86_pmu.lbr_nr - 1;
562 u64 tos = intel_pmu_lbr_tos();
563 int i;
564
Peter Zijlstra63fb3f92010-03-09 11:51:02 +0100565 for (i = 0; i < x86_pmu.lbr_nr; i++) {
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100566 unsigned long lbr_idx = (tos - i) & mask;
567 union {
568 struct {
569 u32 from;
570 u32 to;
571 };
572 u64 lbr;
573 } msr_lastbranch;
574
575 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
576
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100577 cpuc->lbr_entries[i].from = msr_lastbranch.from;
578 cpuc->lbr_entries[i].to = msr_lastbranch.to;
579 cpuc->lbr_entries[i].mispred = 0;
580 cpuc->lbr_entries[i].predicted = 0;
Peter Zijlstraf2200ac2017-04-11 10:10:28 +0200581 cpuc->lbr_entries[i].in_tx = 0;
582 cpuc->lbr_entries[i].abort = 0;
583 cpuc->lbr_entries[i].cycles = 0;
Jin Yaod5c7f9d2017-07-18 20:13:10 +0800584 cpuc->lbr_entries[i].type = 0;
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100585 cpuc->lbr_entries[i].reserved = 0;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100586 }
587 cpuc->lbr_stack.nr = i;
Kan Liangdb278b92020-01-27 08:53:55 -0800588 cpuc->lbr_stack.hw_idx = tos;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100589}
590
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100591/*
592 * Due to lack of segmentation in Linux the effective address (offset)
593 * is the same as the linear address, allowing us to merge the LIP and EIP
594 * LBR formats.
595 */
596static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
597{
Kan Liang0592e572018-06-05 08:38:45 -0700598 bool need_info = false, call_stack = false;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100599 unsigned long mask = x86_pmu.lbr_nr - 1;
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100600 int lbr_format = x86_pmu.intel_cap.lbr_format;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100601 u64 tos = intel_pmu_lbr_tos();
602 int i;
Andi Kleenb7af41a2013-09-20 07:40:44 -0700603 int out = 0;
Andi Kleen90405aa2015-05-27 21:13:18 -0700604 int num = x86_pmu.lbr_nr;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100605
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100606 if (cpuc->lbr_sel) {
607 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
608 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
Kan Liang0592e572018-06-05 08:38:45 -0700609 call_stack = true;
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100610 }
Andi Kleen90405aa2015-05-27 21:13:18 -0700611
612 for (i = 0; i < num; i++) {
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100613 unsigned long lbr_idx = (tos - i) & mask;
Andi Kleen135c5612013-06-17 17:36:51 -0700614 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
615 int skip = 0;
Andi Kleen50eab8f2015-05-10 12:22:43 -0700616 u16 cycles = 0;
Andi Kleen135c5612013-06-17 17:36:51 -0700617 int lbr_flags = lbr_desc[lbr_format];
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100618
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200619 from = rdlbr_from(lbr_idx);
620 to = rdlbr_to(lbr_idx);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100621
Kan Liang0592e572018-06-05 08:38:45 -0700622 /*
623 * Read LBR call stack entries
624 * until invalid entry (0s) is detected.
625 */
626 if (call_stack && !from)
627 break;
628
Andi Kleenb16a5b52015-10-20 11:46:34 -0700629 if (lbr_format == LBR_FORMAT_INFO && need_info) {
Andi Kleen50eab8f2015-05-10 12:22:43 -0700630 u64 info;
631
632 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
633 mis = !!(info & LBR_INFO_MISPRED);
634 pred = !mis;
635 in_tx = !!(info & LBR_INFO_IN_TX);
636 abort = !!(info & LBR_INFO_ABORT);
637 cycles = (info & LBR_INFO_CYCLES);
638 }
Kan Liang8b92c3a2016-04-15 00:42:47 -0700639
640 if (lbr_format == LBR_FORMAT_TIME) {
641 mis = !!(from & LBR_FROM_FLAG_MISPRED);
642 pred = !mis;
643 skip = 1;
644 cycles = ((to >> 48) & LBR_INFO_CYCLES);
645
646 to = (u64)((((s64)to) << 16) >> 16);
647 }
648
Andi Kleen135c5612013-06-17 17:36:51 -0700649 if (lbr_flags & LBR_EIP_FLAGS) {
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100650 mis = !!(from & LBR_FROM_FLAG_MISPRED);
651 pred = !mis;
Andi Kleen135c5612013-06-17 17:36:51 -0700652 skip = 1;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100653 }
Andi Kleen135c5612013-06-17 17:36:51 -0700654 if (lbr_flags & LBR_TSX) {
655 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
656 abort = !!(from & LBR_FROM_FLAG_ABORT);
657 skip = 3;
658 }
659 from = (u64)((((s64)from) << skip) >> skip);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100660
Andi Kleenb7af41a2013-09-20 07:40:44 -0700661 /*
662 * Some CPUs report duplicated abort records,
663 * with the second entry not having an abort bit set.
664 * Skip them here. This loop runs backwards,
665 * so we need to undo the previous record.
666 * If the abort just happened outside the window
667 * the extra entry cannot be removed.
668 */
669 if (abort && x86_pmu.lbr_double_abort && out > 0)
670 out--;
671
672 cpuc->lbr_entries[out].from = from;
673 cpuc->lbr_entries[out].to = to;
674 cpuc->lbr_entries[out].mispred = mis;
675 cpuc->lbr_entries[out].predicted = pred;
676 cpuc->lbr_entries[out].in_tx = in_tx;
677 cpuc->lbr_entries[out].abort = abort;
Andi Kleen50eab8f2015-05-10 12:22:43 -0700678 cpuc->lbr_entries[out].cycles = cycles;
Jin Yaod5c7f9d2017-07-18 20:13:10 +0800679 cpuc->lbr_entries[out].type = 0;
Andi Kleenb7af41a2013-09-20 07:40:44 -0700680 cpuc->lbr_entries[out].reserved = 0;
681 out++;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100682 }
Andi Kleenb7af41a2013-09-20 07:40:44 -0700683 cpuc->lbr_stack.nr = out;
Kan Liangdb278b92020-01-27 08:53:55 -0800684 cpuc->lbr_stack.hw_idx = tos;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100685}
686
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300687void intel_pmu_lbr_read(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100688{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500689 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100690
Andi Kleend3617b982019-04-02 12:45:03 -0700691 /*
692 * Don't read when all LBRs users are using adaptive PEBS.
693 *
694 * This could be smarter and actually check the event,
695 * but this simple approach seems to work for now.
696 */
697 if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100698 return;
699
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100700 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100701 intel_pmu_lbr_read_32(cpuc);
702 else
703 intel_pmu_lbr_read_64(cpuc);
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100704
705 intel_pmu_lbr_filter(cpuc);
706}
707
708/*
709 * SW filter is used:
710 * - in case there is no HW filter
711 * - in case the HW filter has errata or limitations
712 */
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500713static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100714{
715 u64 br_type = event->attr.branch_sample_type;
716 int mask = 0;
717
718 if (br_type & PERF_SAMPLE_BRANCH_USER)
719 mask |= X86_BR_USER;
720
Stephane Eranian2b923c82013-05-21 12:53:37 +0200721 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100722 mask |= X86_BR_KERNEL;
723
724 /* we ignore BRANCH_HV here */
725
726 if (br_type & PERF_SAMPLE_BRANCH_ANY)
727 mask |= X86_BR_ANY;
728
729 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
730 mask |= X86_BR_ANY_CALL;
731
732 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
733 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
734
735 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
736 mask |= X86_BR_IND_CALL;
Andi Kleen135c5612013-06-17 17:36:51 -0700737
738 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
739 mask |= X86_BR_ABORT;
740
741 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
742 mask |= X86_BR_IN_TX;
743
744 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
745 mask |= X86_BR_NO_TX;
746
Anshuman Khandual37548912014-05-22 12:50:09 +0530747 if (br_type & PERF_SAMPLE_BRANCH_COND)
748 mask |= X86_BR_JCC;
749
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500750 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
751 if (!x86_pmu_has_lbr_callstack())
752 return -EOPNOTSUPP;
753 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
754 return -EINVAL;
755 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
756 X86_BR_CALL_STACK;
757 }
758
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200759 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
760 mask |= X86_BR_IND_JMP;
761
Stephane Eraniand8928192015-10-13 09:09:09 +0200762 if (br_type & PERF_SAMPLE_BRANCH_CALL)
763 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
Jin Yaod5c7f9d2017-07-18 20:13:10 +0800764
765 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
766 mask |= X86_BR_TYPE_SAVE;
767
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100768 /*
769 * stash actual user request into reg, it may
770 * be used by fixup code for some CPU
771 */
772 event->hw.branch_reg.reg = mask;
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500773 return 0;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100774}
775
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +0100776/*
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100777 * setup the HW LBR filter
778 * Used only when available, may not be enough to disambiguate
779 * all branches, may need the help of the SW filter
780 */
781static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
782{
783 struct hw_perf_event_extra *reg;
784 u64 br_type = event->attr.branch_sample_type;
Yan, Zheng27ac9052014-11-04 21:55:57 -0500785 u64 mask = 0, v;
786 int i;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100787
Peter Zijlstra2c44b192014-11-05 10:36:45 +0100788 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
Yan, Zheng27ac9052014-11-04 21:55:57 -0500789 if (!(br_type & (1ULL << i)))
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100790 continue;
791
Yan, Zheng27ac9052014-11-04 21:55:57 -0500792 v = x86_pmu.lbr_sel_map[i];
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100793 if (v == LBR_NOT_SUPP)
794 return -EOPNOTSUPP;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100795
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100796 if (v != LBR_IGN)
797 mask |= v;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100798 }
Andi Kleenb16a5b52015-10-20 11:46:34 -0700799
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100800 reg = &event->hw.branch_reg;
801 reg->idx = EXTRA_REG_LBR;
802
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500803 /*
804 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
805 * in suppress mode. So LBR_SELECT should be set to
806 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
Kan Liangcf3beb72016-04-21 02:30:10 -0700807 * But the 10th bit LBR_CALL_STACK does not operate
808 * in suppress mode.
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500809 */
Kan Liangcf3beb72016-04-21 02:30:10 -0700810 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100811
Andi Kleenb16a5b52015-10-20 11:46:34 -0700812 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
813 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
814 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
815 reg->config |= LBR_NO_INFO;
816
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100817 return 0;
818}
819
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100820int intel_pmu_setup_lbr_filter(struct perf_event *event)
821{
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100822 int ret = 0;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100823
824 /*
825 * no LBR on this PMU
826 */
827 if (!x86_pmu.lbr_nr)
828 return -EOPNOTSUPP;
829
830 /*
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100831 * setup SW LBR filter
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100832 */
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -0500833 ret = intel_pmu_setup_sw_lbr_filter(event);
834 if (ret)
835 return ret;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100836
837 /*
838 * setup HW LBR filter, if any
839 */
840 if (x86_pmu.lbr_sel_map)
841 ret = intel_pmu_setup_hw_lbr_filter(event);
842
843 return ret;
844}
845
846/*
847 * return the type of control flow change at address "from"
Adam Buchbinder6a6256f2016-02-23 15:34:30 -0800848 * instruction is not necessarily a branch (in case of interrupt).
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100849 *
850 * The branch type returned also includes the priv level of the
851 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
852 *
853 * If a branch type is unknown OR the instruction cannot be
854 * decoded (e.g., text page not present), then X86_BR_NONE is
855 * returned.
856 */
Andi Kleen135c5612013-06-17 17:36:51 -0700857static int branch_type(unsigned long from, unsigned long to, int abort)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100858{
859 struct insn insn;
860 void *addr;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800861 int bytes_read, bytes_left;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100862 int ret = X86_BR_NONE;
863 int ext, to_plm, from_plm;
864 u8 buf[MAX_INSN_SIZE];
865 int is64 = 0;
866
867 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
868 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
869
870 /*
871 * maybe zero if lbr did not fill up after a reset by the time
872 * we get a PMU interrupt
873 */
874 if (from == 0 || to == 0)
875 return X86_BR_NONE;
876
Andi Kleen135c5612013-06-17 17:36:51 -0700877 if (abort)
878 return X86_BR_ABORT | to_plm;
879
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100880 if (from_plm == X86_BR_USER) {
881 /*
882 * can happen if measuring at the user level only
883 * and we interrupt in a kernel thread, e.g., idle.
884 */
885 if (!current->mm)
886 return X86_BR_NONE;
887
888 /* may fail if text not present */
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800889 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
890 MAX_INSN_SIZE);
891 bytes_read = MAX_INSN_SIZE - bytes_left;
892 if (!bytes_read)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100893 return X86_BR_NONE;
894
895 addr = buf;
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200896 } else {
897 /*
898 * The LBR logs any address in the IP, even if the IP just
899 * faulted. This means userspace can control the from address.
900 * Ensure we don't blindy read any address by validating it is
901 * a known text address.
902 */
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800903 if (kernel_text_address(from)) {
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200904 addr = (void *)from;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800905 /*
906 * Assume we can get the maximum possible size
907 * when grabbing kernel data. This is not
908 * _strictly_ true since we could possibly be
909 * executing up next to a memory hole, but
910 * it is very unlikely to be a problem.
911 */
912 bytes_read = MAX_INSN_SIZE;
913 } else {
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200914 return X86_BR_NONE;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800915 }
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200916 }
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100917
918 /*
919 * decoder needs to know the ABI especially
920 * on 64-bit systems running 32-bit apps
921 */
922#ifdef CONFIG_X86_64
923 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
924#endif
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800925 insn_init(&insn, addr, bytes_read, is64);
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100926 insn_get_opcode(&insn);
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800927 if (!insn.opcode.got)
928 return X86_BR_ABORT;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100929
930 switch (insn.opcode.bytes[0]) {
931 case 0xf:
932 switch (insn.opcode.bytes[1]) {
933 case 0x05: /* syscall */
934 case 0x34: /* sysenter */
935 ret = X86_BR_SYSCALL;
936 break;
937 case 0x07: /* sysret */
938 case 0x35: /* sysexit */
939 ret = X86_BR_SYSRET;
940 break;
941 case 0x80 ... 0x8f: /* conditional */
942 ret = X86_BR_JCC;
943 break;
944 default:
945 ret = X86_BR_NONE;
946 }
947 break;
948 case 0x70 ... 0x7f: /* conditional */
949 ret = X86_BR_JCC;
950 break;
951 case 0xc2: /* near ret */
952 case 0xc3: /* near ret */
953 case 0xca: /* far ret */
954 case 0xcb: /* far ret */
955 ret = X86_BR_RET;
956 break;
957 case 0xcf: /* iret */
958 ret = X86_BR_IRET;
959 break;
960 case 0xcc ... 0xce: /* int */
961 ret = X86_BR_INT;
962 break;
963 case 0xe8: /* call near rel */
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500964 insn_get_immediate(&insn);
965 if (insn.immediate1.value == 0) {
966 /* zero length call */
967 ret = X86_BR_ZERO_CALL;
968 break;
969 }
Gustavo A. R. Silva2b0fc372019-01-25 12:49:17 -0600970 /* fall through */
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100971 case 0x9a: /* call far absolute */
972 ret = X86_BR_CALL;
973 break;
974 case 0xe0 ... 0xe3: /* loop jmp */
975 ret = X86_BR_JCC;
976 break;
977 case 0xe9 ... 0xeb: /* jmp */
978 ret = X86_BR_JMP;
979 break;
980 case 0xff: /* call near absolute, call far absolute ind */
981 insn_get_modrm(&insn);
982 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
983 switch (ext) {
984 case 2: /* near ind call */
985 case 3: /* far ind call */
986 ret = X86_BR_IND_CALL;
987 break;
988 case 4:
989 case 5:
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200990 ret = X86_BR_IND_JMP;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100991 break;
992 }
993 break;
994 default:
995 ret = X86_BR_NONE;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100996 }
997 /*
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100998 * interrupts, traps, faults (and thus ring transition) may
999 * occur on any instructions. Thus, to classify them correctly,
1000 * we need to first look at the from and to priv levels. If they
1001 * are different and to is in the kernel, then it indicates
1002 * a ring transition. If the from instruction is not a ring
1003 * transition instr (syscall, systenter, int), then it means
1004 * it was a irq, trap or fault.
1005 *
1006 * we have no way of detecting kernel to kernel faults.
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001007 */
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001008 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1009 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1010 ret = X86_BR_IRQ;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001011
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001012 /*
1013 * branch priv level determined by target as
1014 * is done by HW when LBR_SELECT is implemented
1015 */
1016 if (ret != X86_BR_NONE)
1017 ret |= to_plm;
1018
1019 return ret;
1020}
1021
Jin Yaod5c7f9d2017-07-18 20:13:10 +08001022#define X86_BR_TYPE_MAP_MAX 16
1023
1024static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1025 PERF_BR_CALL, /* X86_BR_CALL */
1026 PERF_BR_RET, /* X86_BR_RET */
1027 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
1028 PERF_BR_SYSRET, /* X86_BR_SYSRET */
1029 PERF_BR_UNKNOWN, /* X86_BR_INT */
1030 PERF_BR_UNKNOWN, /* X86_BR_IRET */
1031 PERF_BR_COND, /* X86_BR_JCC */
1032 PERF_BR_UNCOND, /* X86_BR_JMP */
1033 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
1034 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
1035 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
1036 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
1037 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
1038 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
1039 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
1040 PERF_BR_IND, /* X86_BR_IND_JMP */
1041};
1042
1043static int
1044common_branch_type(int type)
1045{
1046 int i;
1047
1048 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1049
1050 if (type) {
1051 i = __ffs(type);
1052 if (i < X86_BR_TYPE_MAP_MAX)
1053 return branch_map[i];
1054 }
1055
1056 return PERF_BR_UNKNOWN;
1057}
1058
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001059/*
1060 * implement actual branch filter based on user demand.
1061 * Hardware may not exactly satisfy that request, thus
1062 * we need to inspect opcodes. Mismatched branches are
1063 * discarded. Therefore, the number of branches returned
1064 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1065 */
1066static void
1067intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1068{
1069 u64 from, to;
1070 int br_sel = cpuc->br_sel;
1071 int i, j, type;
1072 bool compress = false;
1073
1074 /* if sampling all branches, then nothing to filter */
Jin Yaod5c7f9d2017-07-18 20:13:10 +08001075 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1076 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001077 return;
1078
1079 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1080
1081 from = cpuc->lbr_entries[i].from;
1082 to = cpuc->lbr_entries[i].to;
1083
Andi Kleen135c5612013-06-17 17:36:51 -07001084 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1085 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1086 if (cpuc->lbr_entries[i].in_tx)
1087 type |= X86_BR_IN_TX;
1088 else
1089 type |= X86_BR_NO_TX;
1090 }
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001091
1092 /* if type does not correspond, then discard */
1093 if (type == X86_BR_NONE || (br_sel & type) != type) {
1094 cpuc->lbr_entries[i].from = 0;
1095 compress = true;
1096 }
Jin Yaod5c7f9d2017-07-18 20:13:10 +08001097
1098 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1099 cpuc->lbr_entries[i].type = common_branch_type(type);
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001100 }
1101
1102 if (!compress)
1103 return;
1104
1105 /* remove all entries with from=0 */
1106 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1107 if (!cpuc->lbr_entries[i].from) {
1108 j = i;
1109 while (++j < cpuc->lbr_stack.nr)
1110 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1111 cpuc->lbr_stack.nr--;
1112 if (!cpuc->lbr_entries[i].from)
1113 continue;
1114 }
1115 i++;
1116 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001117}
1118
Kan Liangc22497f2019-04-02 12:45:02 -07001119void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
1120{
1121 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1122 int i;
1123
1124 cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
Kan Liangdb278b92020-01-27 08:53:55 -08001125
1126 /* Cannot get TOS for large PEBS */
1127 if (cpuc->n_pebs == cpuc->n_large_pebs)
1128 cpuc->lbr_stack.hw_idx = -1ULL;
1129 else
1130 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1131
Kan Liangc22497f2019-04-02 12:45:02 -07001132 for (i = 0; i < x86_pmu.lbr_nr; i++) {
1133 u64 info = lbr->lbr[i].info;
1134 struct perf_branch_entry *e = &cpuc->lbr_entries[i];
1135
1136 e->from = lbr->lbr[i].from;
1137 e->to = lbr->lbr[i].to;
1138 e->mispred = !!(info & LBR_INFO_MISPRED);
1139 e->predicted = !(info & LBR_INFO_MISPRED);
1140 e->in_tx = !!(info & LBR_INFO_IN_TX);
1141 e->abort = !!(info & LBR_INFO_ABORT);
1142 e->cycles = info & LBR_INFO_CYCLES;
1143 e->reserved = 0;
1144 }
1145 intel_pmu_lbr_filter(cpuc);
1146}
1147
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001148/*
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001149 * Map interface branch filters onto LBR filters
1150 */
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001151static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zheng27ac9052014-11-04 21:55:57 -05001152 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1153 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1154 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1155 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1156 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1157 | LBR_IND_JMP | LBR_FAR,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001158 /*
1159 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1160 */
Yan, Zheng27ac9052014-11-04 21:55:57 -05001161 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001162 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1163 /*
1164 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1165 */
Yan, Zheng27ac9052014-11-04 21:55:57 -05001166 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1167 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001168 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001169};
1170
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001171static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zheng27ac9052014-11-04 21:55:57 -05001172 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1173 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1174 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1175 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1176 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1177 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1178 | LBR_FAR,
1179 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1180 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001181 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eraniand8928192015-10-13 09:09:09 +02001182 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001183};
1184
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001185static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -05001186 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1187 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1188 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1189 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1190 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1191 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1192 | LBR_FAR,
1193 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1194 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1195 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1196 | LBR_RETURN | LBR_CALL_STACK,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001197 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eraniand8928192015-10-13 09:09:09 +02001198 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -05001199};
1200
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001201/* core */
Mathias Krause066ce642014-08-26 18:49:45 +02001202void __init intel_pmu_lbr_init_core(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001203{
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001204 x86_pmu.lbr_nr = 4;
Stephane Eranian225ce532012-02-09 23:20:52 +01001205 x86_pmu.lbr_tos = MSR_LBR_TOS;
1206 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1207 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001208
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001209 /*
1210 * SW branch filter usage:
1211 * - compensate for lack of HW filter
1212 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001213}
1214
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001215/* nehalem/westmere */
Mathias Krause066ce642014-08-26 18:49:45 +02001216void __init intel_pmu_lbr_init_nhm(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001217{
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001218 x86_pmu.lbr_nr = 16;
Stephane Eranian225ce532012-02-09 23:20:52 +01001219 x86_pmu.lbr_tos = MSR_LBR_TOS;
1220 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1221 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001222
1223 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1224 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1225
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001226 /*
1227 * SW branch filter usage:
1228 * - workaround LBR_SEL errata (see above)
1229 * - support syscall, sysret capture.
1230 * That requires LBR_FAR but that means far
1231 * jmp need to be filtered out
1232 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001233}
1234
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001235/* sandy bridge */
Mathias Krause066ce642014-08-26 18:49:45 +02001236void __init intel_pmu_lbr_init_snb(void)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001237{
1238 x86_pmu.lbr_nr = 16;
1239 x86_pmu.lbr_tos = MSR_LBR_TOS;
1240 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1241 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1242
1243 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1244 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1245
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001246 /*
1247 * SW branch filter usage:
1248 * - support syscall, sysret capture.
1249 * That requires LBR_FAR but that means far
1250 * jmp need to be filtered out
1251 */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001252}
1253
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -05001254/* haswell */
1255void intel_pmu_lbr_init_hsw(void)
1256{
1257 x86_pmu.lbr_nr = 16;
1258 x86_pmu.lbr_tos = MSR_LBR_TOS;
1259 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1260 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1261
1262 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1263 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -07001264
1265 if (lbr_from_signext_quirk_needed())
1266 static_branch_enable(&lbr_from_quirk_key);
Yan, Zhenge9d7f7cd2014-11-04 21:56:00 -05001267}
1268
Andi Kleen9a92e162015-05-10 12:22:44 -07001269/* skylake */
1270__init void intel_pmu_lbr_init_skl(void)
1271{
1272 x86_pmu.lbr_nr = 32;
1273 x86_pmu.lbr_tos = MSR_LBR_TOS;
1274 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1275 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1276
1277 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1278 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1279
1280 /*
1281 * SW branch filter usage:
1282 * - support syscall, sysret capture.
1283 * That requires LBR_FAR but that means far
1284 * jmp need to be filtered out
1285 */
Andi Kleen9a92e162015-05-10 12:22:44 -07001286}
1287
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001288/* atom */
Mathias Krause066ce642014-08-26 18:49:45 +02001289void __init intel_pmu_lbr_init_atom(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001290{
Stephane Eranian88c9a652012-02-09 23:20:56 +01001291 /*
1292 * only models starting at stepping 10 seems
1293 * to have an operational LBR which can freeze
1294 * on PMU interrupt
1295 */
Stephane Eranian3ec18cd2012-08-20 11:24:21 +02001296 if (boot_cpu_data.x86_model == 28
Jia Zhangb3991512018-01-01 09:52:10 +08001297 && boot_cpu_data.x86_stepping < 10) {
Stephane Eranian88c9a652012-02-09 23:20:56 +01001298 pr_cont("LBR disabled due to erratum");
1299 return;
1300 }
1301
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001302 x86_pmu.lbr_nr = 8;
Stephane Eranian225ce532012-02-09 23:20:52 +01001303 x86_pmu.lbr_tos = MSR_LBR_TOS;
1304 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1305 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001306
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001307 /*
1308 * SW branch filter usage:
1309 * - compensate for lack of HW filter
1310 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001311}
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001312
Kan Liangf21d5ad2016-04-15 00:53:45 -07001313/* slm */
1314void __init intel_pmu_lbr_init_slm(void)
1315{
1316 x86_pmu.lbr_nr = 8;
1317 x86_pmu.lbr_tos = MSR_LBR_TOS;
1318 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1319 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1320
1321 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1322 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1323
1324 /*
1325 * SW branch filter usage:
1326 * - compensate for lack of HW filter
1327 */
1328 pr_cont("8-deep LBR, ");
1329}
1330
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001331/* Knights Landing */
1332void intel_pmu_lbr_init_knl(void)
1333{
1334 x86_pmu.lbr_nr = 8;
1335 x86_pmu.lbr_tos = MSR_LBR_TOS;
1336 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1337 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1338
1339 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1340 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
Jacek Tomaka16160c12018-08-02 09:38:30 +08001341
1342 /* Knights Landing does have MISPREDICT bit */
1343 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1344 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001345}
Like Xub2d65042020-06-13 16:09:48 +08001346
1347/**
1348 * x86_perf_get_lbr - get the LBR records information
1349 *
1350 * @lbr: the caller's memory to store the LBR records information
1351 *
1352 * Returns: 0 indicates the LBR info has been successfully obtained
1353 */
1354int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1355{
1356 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1357
1358 lbr->nr = x86_pmu.lbr_nr;
1359 lbr->from = x86_pmu.lbr_from;
1360 lbr->to = x86_pmu.lbr_to;
1361 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? MSR_LBR_INFO_0 : 0;
1362
1363 return 0;
1364}
1365EXPORT_SYMBOL_GPL(x86_perf_get_lbr);