blob: bb7e1132290b0073b0ff3b0ac68d69d48016d2a8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jiri Olsac5e63192012-08-07 15:20:36 +02002#include <linux/errno.h>
3#include <linux/kernel.h>
Jiri Olsa40189942012-08-07 15:20:37 +02004#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01005#include <linux/sched/task_stack.h>
Jiri Olsa40189942012-08-07 15:20:37 +02006#include <linux/perf_event.h>
Jiri Olsac5e63192012-08-07 15:20:36 +02007#include <linux/bug.h>
8#include <linux/stddef.h>
9#include <asm/perf_regs.h>
10#include <asm/ptrace.h>
11
12#ifdef CONFIG_X86_32
13#define PERF_REG_X86_MAX PERF_REG_X86_32_MAX
14#else
15#define PERF_REG_X86_MAX PERF_REG_X86_64_MAX
16#endif
17
18#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
19
20static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
21 PT_REGS_OFFSET(PERF_REG_X86_AX, ax),
22 PT_REGS_OFFSET(PERF_REG_X86_BX, bx),
23 PT_REGS_OFFSET(PERF_REG_X86_CX, cx),
24 PT_REGS_OFFSET(PERF_REG_X86_DX, dx),
25 PT_REGS_OFFSET(PERF_REG_X86_SI, si),
26 PT_REGS_OFFSET(PERF_REG_X86_DI, di),
27 PT_REGS_OFFSET(PERF_REG_X86_BP, bp),
28 PT_REGS_OFFSET(PERF_REG_X86_SP, sp),
29 PT_REGS_OFFSET(PERF_REG_X86_IP, ip),
30 PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags),
31 PT_REGS_OFFSET(PERF_REG_X86_CS, cs),
32 PT_REGS_OFFSET(PERF_REG_X86_SS, ss),
33#ifdef CONFIG_X86_32
34 PT_REGS_OFFSET(PERF_REG_X86_DS, ds),
35 PT_REGS_OFFSET(PERF_REG_X86_ES, es),
36 PT_REGS_OFFSET(PERF_REG_X86_FS, fs),
37 PT_REGS_OFFSET(PERF_REG_X86_GS, gs),
38#else
39 /*
40 * The pt_regs struct does not store
41 * ds, es, fs, gs in 64 bit mode.
42 */
43 (unsigned int) -1,
44 (unsigned int) -1,
45 (unsigned int) -1,
46 (unsigned int) -1,
47#endif
48#ifdef CONFIG_X86_64
49 PT_REGS_OFFSET(PERF_REG_X86_R8, r8),
50 PT_REGS_OFFSET(PERF_REG_X86_R9, r9),
51 PT_REGS_OFFSET(PERF_REG_X86_R10, r10),
52 PT_REGS_OFFSET(PERF_REG_X86_R11, r11),
53 PT_REGS_OFFSET(PERF_REG_X86_R12, r12),
54 PT_REGS_OFFSET(PERF_REG_X86_R13, r13),
55 PT_REGS_OFFSET(PERF_REG_X86_R14, r14),
56 PT_REGS_OFFSET(PERF_REG_X86_R15, r15),
57#endif
58};
59
60u64 perf_reg_value(struct pt_regs *regs, int idx)
61{
Kan Liang878068e2019-04-02 12:44:59 -070062 struct x86_perf_regs *perf_regs;
63
64 if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
65 perf_regs = container_of(regs, struct x86_perf_regs, regs);
66 if (!perf_regs->xmm_regs)
67 return 0;
68 return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
69 }
70
Dan Carpenter1e6dd8a2012-09-05 15:31:26 +030071 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
Jiri Olsac5e63192012-08-07 15:20:36 +020072 return 0;
73
74 return regs_get_register(regs, pt_regs_offset[idx]);
75}
76
Kan Liang90d424912019-05-28 15:08:31 -070077#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \
78 ~((1ULL << PERF_REG_X86_MAX) - 1))
79
Jiri Olsac5e63192012-08-07 15:20:36 +020080#ifdef CONFIG_X86_32
Kan Liang878068e2019-04-02 12:44:59 -070081#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
82 (1ULL << PERF_REG_X86_R9) | \
83 (1ULL << PERF_REG_X86_R10) | \
84 (1ULL << PERF_REG_X86_R11) | \
85 (1ULL << PERF_REG_X86_R12) | \
86 (1ULL << PERF_REG_X86_R13) | \
87 (1ULL << PERF_REG_X86_R14) | \
88 (1ULL << PERF_REG_X86_R15))
89
Jiri Olsac5e63192012-08-07 15:20:36 +020090int perf_reg_validate(u64 mask)
91{
Kan Liang90d424912019-05-28 15:08:31 -070092 if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
Jiri Olsac5e63192012-08-07 15:20:36 +020093 return -EINVAL;
94
95 return 0;
96}
Jiri Olsa40189942012-08-07 15:20:37 +020097
98u64 perf_reg_abi(struct task_struct *task)
99{
100 return PERF_SAMPLE_REGS_ABI_32;
101}
Andy Lutomirski88a7c262015-01-04 10:36:19 -0800102
103void perf_get_regs_user(struct perf_regs *regs_user,
104 struct pt_regs *regs,
105 struct pt_regs *regs_user_copy)
106{
107 regs_user->regs = task_pt_regs(current);
108 regs_user->abi = perf_reg_abi(current);
109}
Jiri Olsac5e63192012-08-07 15:20:36 +0200110#else /* CONFIG_X86_64 */
111#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
112 (1ULL << PERF_REG_X86_ES) | \
113 (1ULL << PERF_REG_X86_FS) | \
114 (1ULL << PERF_REG_X86_GS))
115
116int perf_reg_validate(u64 mask)
117{
Kan Liang90d424912019-05-28 15:08:31 -0700118 if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
Jiri Olsac5e63192012-08-07 15:20:36 +0200119 return -EINVAL;
120
121 return 0;
122}
Jiri Olsa40189942012-08-07 15:20:37 +0200123
124u64 perf_reg_abi(struct task_struct *task)
125{
126 if (test_tsk_thread_flag(task, TIF_IA32))
127 return PERF_SAMPLE_REGS_ABI_32;
128 else
129 return PERF_SAMPLE_REGS_ABI_64;
130}
Andy Lutomirski88a7c262015-01-04 10:36:19 -0800131
132void perf_get_regs_user(struct perf_regs *regs_user,
133 struct pt_regs *regs,
134 struct pt_regs *regs_user_copy)
135{
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800136 struct pt_regs *user_regs = task_pt_regs(current);
137
138 /*
139 * If we're in an NMI that interrupted task_pt_regs setup, then
140 * we can't sample user regs at all. This check isn't really
141 * sufficient, though, as we could be in an NMI inside an interrupt
142 * that happened during task_pt_regs setup.
143 */
144 if (regs->sp > (unsigned long)&user_regs->r11 &&
145 regs->sp <= (unsigned long)(user_regs + 1)) {
146 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
147 regs_user->regs = NULL;
148 return;
149 }
150
151 /*
Denys Vlasenkoaa21df02015-04-10 15:06:56 +0200152 * These registers are always saved on 64-bit syscall entry.
153 * On 32-bit entry points, they are saved too except r8..r11.
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800154 */
155 regs_user_copy->ip = user_regs->ip;
Denys Vlasenko3b752322015-04-10 15:06:59 +0200156 regs_user_copy->ax = user_regs->ax;
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800157 regs_user_copy->cx = user_regs->cx;
158 regs_user_copy->dx = user_regs->dx;
159 regs_user_copy->si = user_regs->si;
160 regs_user_copy->di = user_regs->di;
161 regs_user_copy->r8 = user_regs->r8;
162 regs_user_copy->r9 = user_regs->r9;
163 regs_user_copy->r10 = user_regs->r10;
164 regs_user_copy->r11 = user_regs->r11;
165 regs_user_copy->orig_ax = user_regs->orig_ax;
166 regs_user_copy->flags = user_regs->flags;
Denys Vlasenkoaa21df02015-04-10 15:06:56 +0200167 regs_user_copy->sp = user_regs->sp;
168 regs_user_copy->cs = user_regs->cs;
169 regs_user_copy->ss = user_regs->ss;
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800170 /*
Alexey Budankov10b11052018-05-24 17:11:54 +0300171 * Store user space frame-pointer value on sample
172 * to facilitate stack unwinding for cases when
173 * user space executable code has such support
174 * enabled at compile time:
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800175 */
Alexey Budankov10b11052018-05-24 17:11:54 +0300176 regs_user_copy->bp = user_regs->bp;
177
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800178 regs_user_copy->bx = -1;
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800179 regs_user_copy->r12 = -1;
180 regs_user_copy->r13 = -1;
181 regs_user_copy->r14 = -1;
182 regs_user_copy->r15 = -1;
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800183 /*
184 * For this to be at all useful, we need a reasonable guess for
Denys Vlasenkoaa21df02015-04-10 15:06:56 +0200185 * the ABI. Be careful: we're in NMI context, and we're
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800186 * considering current to be the current task, so we should
187 * be careful not to look at any other percpu variables that might
188 * change during context switches.
189 */
Denys Vlasenko32caa062015-04-10 15:06:58 +0200190 regs_user->abi = user_64bit_mode(user_regs) ?
191 PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
Andy Lutomirski86c269f2015-01-04 10:36:20 -0800192
193 regs_user->regs = regs_user_copy;
Andy Lutomirski88a7c262015-01-04 10:36:19 -0800194}
Jiri Olsac5e63192012-08-07 15:20:36 +0200195#endif /* CONFIG_X86_32 */