Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 2 | #include <linux/errno.h> |
| 3 | #include <linux/kernel.h> |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 4 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 5 | #include <linux/sched/task_stack.h> |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6 | #include <linux/perf_event.h> |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 7 | #include <linux/bug.h> |
| 8 | #include <linux/stddef.h> |
| 9 | #include <asm/perf_regs.h> |
| 10 | #include <asm/ptrace.h> |
| 11 | |
| 12 | #ifdef CONFIG_X86_32 |
| 13 | #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX |
| 14 | #else |
| 15 | #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX |
| 16 | #endif |
| 17 | |
| 18 | #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) |
| 19 | |
| 20 | static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { |
| 21 | PT_REGS_OFFSET(PERF_REG_X86_AX, ax), |
| 22 | PT_REGS_OFFSET(PERF_REG_X86_BX, bx), |
| 23 | PT_REGS_OFFSET(PERF_REG_X86_CX, cx), |
| 24 | PT_REGS_OFFSET(PERF_REG_X86_DX, dx), |
| 25 | PT_REGS_OFFSET(PERF_REG_X86_SI, si), |
| 26 | PT_REGS_OFFSET(PERF_REG_X86_DI, di), |
| 27 | PT_REGS_OFFSET(PERF_REG_X86_BP, bp), |
| 28 | PT_REGS_OFFSET(PERF_REG_X86_SP, sp), |
| 29 | PT_REGS_OFFSET(PERF_REG_X86_IP, ip), |
| 30 | PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags), |
| 31 | PT_REGS_OFFSET(PERF_REG_X86_CS, cs), |
| 32 | PT_REGS_OFFSET(PERF_REG_X86_SS, ss), |
| 33 | #ifdef CONFIG_X86_32 |
| 34 | PT_REGS_OFFSET(PERF_REG_X86_DS, ds), |
| 35 | PT_REGS_OFFSET(PERF_REG_X86_ES, es), |
| 36 | PT_REGS_OFFSET(PERF_REG_X86_FS, fs), |
| 37 | PT_REGS_OFFSET(PERF_REG_X86_GS, gs), |
| 38 | #else |
| 39 | /* |
| 40 | * The pt_regs struct does not store |
| 41 | * ds, es, fs, gs in 64 bit mode. |
| 42 | */ |
| 43 | (unsigned int) -1, |
| 44 | (unsigned int) -1, |
| 45 | (unsigned int) -1, |
| 46 | (unsigned int) -1, |
| 47 | #endif |
| 48 | #ifdef CONFIG_X86_64 |
| 49 | PT_REGS_OFFSET(PERF_REG_X86_R8, r8), |
| 50 | PT_REGS_OFFSET(PERF_REG_X86_R9, r9), |
| 51 | PT_REGS_OFFSET(PERF_REG_X86_R10, r10), |
| 52 | PT_REGS_OFFSET(PERF_REG_X86_R11, r11), |
| 53 | PT_REGS_OFFSET(PERF_REG_X86_R12, r12), |
| 54 | PT_REGS_OFFSET(PERF_REG_X86_R13, r13), |
| 55 | PT_REGS_OFFSET(PERF_REG_X86_R14, r14), |
| 56 | PT_REGS_OFFSET(PERF_REG_X86_R15, r15), |
| 57 | #endif |
| 58 | }; |
| 59 | |
| 60 | u64 perf_reg_value(struct pt_regs *regs, int idx) |
| 61 | { |
Kan Liang | 878068e | 2019-04-02 12:44:59 -0700 | [diff] [blame] | 62 | struct x86_perf_regs *perf_regs; |
| 63 | |
| 64 | if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { |
| 65 | perf_regs = container_of(regs, struct x86_perf_regs, regs); |
| 66 | if (!perf_regs->xmm_regs) |
| 67 | return 0; |
| 68 | return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; |
| 69 | } |
| 70 | |
Dan Carpenter | 1e6dd8a | 2012-09-05 15:31:26 +0300 | [diff] [blame] | 71 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 72 | return 0; |
| 73 | |
| 74 | return regs_get_register(regs, pt_regs_offset[idx]); |
| 75 | } |
| 76 | |
Kan Liang | 90d42491 | 2019-05-28 15:08:31 -0700 | [diff] [blame] | 77 | #define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ |
| 78 | ~((1ULL << PERF_REG_X86_MAX) - 1)) |
| 79 | |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 80 | #ifdef CONFIG_X86_32 |
Kan Liang | 878068e | 2019-04-02 12:44:59 -0700 | [diff] [blame] | 81 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ |
| 82 | (1ULL << PERF_REG_X86_R9) | \ |
| 83 | (1ULL << PERF_REG_X86_R10) | \ |
| 84 | (1ULL << PERF_REG_X86_R11) | \ |
| 85 | (1ULL << PERF_REG_X86_R12) | \ |
| 86 | (1ULL << PERF_REG_X86_R13) | \ |
| 87 | (1ULL << PERF_REG_X86_R14) | \ |
| 88 | (1ULL << PERF_REG_X86_R15)) |
| 89 | |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 90 | int perf_reg_validate(u64 mask) |
| 91 | { |
Kan Liang | 90d42491 | 2019-05-28 15:08:31 -0700 | [diff] [blame] | 92 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 93 | return -EINVAL; |
| 94 | |
| 95 | return 0; |
| 96 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 97 | |
| 98 | u64 perf_reg_abi(struct task_struct *task) |
| 99 | { |
| 100 | return PERF_SAMPLE_REGS_ABI_32; |
| 101 | } |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 102 | |
| 103 | void perf_get_regs_user(struct perf_regs *regs_user, |
| 104 | struct pt_regs *regs, |
| 105 | struct pt_regs *regs_user_copy) |
| 106 | { |
| 107 | regs_user->regs = task_pt_regs(current); |
| 108 | regs_user->abi = perf_reg_abi(current); |
| 109 | } |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 110 | #else /* CONFIG_X86_64 */ |
| 111 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ |
| 112 | (1ULL << PERF_REG_X86_ES) | \ |
| 113 | (1ULL << PERF_REG_X86_FS) | \ |
| 114 | (1ULL << PERF_REG_X86_GS)) |
| 115 | |
| 116 | int perf_reg_validate(u64 mask) |
| 117 | { |
Kan Liang | 90d42491 | 2019-05-28 15:08:31 -0700 | [diff] [blame] | 118 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 119 | return -EINVAL; |
| 120 | |
| 121 | return 0; |
| 122 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 123 | |
| 124 | u64 perf_reg_abi(struct task_struct *task) |
| 125 | { |
| 126 | if (test_tsk_thread_flag(task, TIF_IA32)) |
| 127 | return PERF_SAMPLE_REGS_ABI_32; |
| 128 | else |
| 129 | return PERF_SAMPLE_REGS_ABI_64; |
| 130 | } |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 131 | |
| 132 | void perf_get_regs_user(struct perf_regs *regs_user, |
| 133 | struct pt_regs *regs, |
| 134 | struct pt_regs *regs_user_copy) |
| 135 | { |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 136 | struct pt_regs *user_regs = task_pt_regs(current); |
| 137 | |
| 138 | /* |
| 139 | * If we're in an NMI that interrupted task_pt_regs setup, then |
| 140 | * we can't sample user regs at all. This check isn't really |
| 141 | * sufficient, though, as we could be in an NMI inside an interrupt |
| 142 | * that happened during task_pt_regs setup. |
| 143 | */ |
| 144 | if (regs->sp > (unsigned long)&user_regs->r11 && |
| 145 | regs->sp <= (unsigned long)(user_regs + 1)) { |
| 146 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; |
| 147 | regs_user->regs = NULL; |
| 148 | return; |
| 149 | } |
| 150 | |
| 151 | /* |
Denys Vlasenko | aa21df0 | 2015-04-10 15:06:56 +0200 | [diff] [blame] | 152 | * These registers are always saved on 64-bit syscall entry. |
| 153 | * On 32-bit entry points, they are saved too except r8..r11. |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 154 | */ |
| 155 | regs_user_copy->ip = user_regs->ip; |
Denys Vlasenko | 3b75232 | 2015-04-10 15:06:59 +0200 | [diff] [blame] | 156 | regs_user_copy->ax = user_regs->ax; |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 157 | regs_user_copy->cx = user_regs->cx; |
| 158 | regs_user_copy->dx = user_regs->dx; |
| 159 | regs_user_copy->si = user_regs->si; |
| 160 | regs_user_copy->di = user_regs->di; |
| 161 | regs_user_copy->r8 = user_regs->r8; |
| 162 | regs_user_copy->r9 = user_regs->r9; |
| 163 | regs_user_copy->r10 = user_regs->r10; |
| 164 | regs_user_copy->r11 = user_regs->r11; |
| 165 | regs_user_copy->orig_ax = user_regs->orig_ax; |
| 166 | regs_user_copy->flags = user_regs->flags; |
Denys Vlasenko | aa21df0 | 2015-04-10 15:06:56 +0200 | [diff] [blame] | 167 | regs_user_copy->sp = user_regs->sp; |
| 168 | regs_user_copy->cs = user_regs->cs; |
| 169 | regs_user_copy->ss = user_regs->ss; |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 170 | /* |
Alexey Budankov | 10b1105 | 2018-05-24 17:11:54 +0300 | [diff] [blame] | 171 | * Store user space frame-pointer value on sample |
| 172 | * to facilitate stack unwinding for cases when |
| 173 | * user space executable code has such support |
| 174 | * enabled at compile time: |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 175 | */ |
Alexey Budankov | 10b1105 | 2018-05-24 17:11:54 +0300 | [diff] [blame] | 176 | regs_user_copy->bp = user_regs->bp; |
| 177 | |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 178 | regs_user_copy->bx = -1; |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 179 | regs_user_copy->r12 = -1; |
| 180 | regs_user_copy->r13 = -1; |
| 181 | regs_user_copy->r14 = -1; |
| 182 | regs_user_copy->r15 = -1; |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 183 | /* |
| 184 | * For this to be at all useful, we need a reasonable guess for |
Denys Vlasenko | aa21df0 | 2015-04-10 15:06:56 +0200 | [diff] [blame] | 185 | * the ABI. Be careful: we're in NMI context, and we're |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 186 | * considering current to be the current task, so we should |
| 187 | * be careful not to look at any other percpu variables that might |
| 188 | * change during context switches. |
| 189 | */ |
Denys Vlasenko | 32caa06 | 2015-04-10 15:06:58 +0200 | [diff] [blame] | 190 | regs_user->abi = user_64bit_mode(user_regs) ? |
| 191 | PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; |
Andy Lutomirski | 86c269f | 2015-01-04 10:36:20 -0800 | [diff] [blame] | 192 | |
| 193 | regs_user->regs = regs_user_copy; |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 194 | } |
Jiri Olsa | c5e6319 | 2012-08-07 15:20:36 +0200 | [diff] [blame] | 195 | #endif /* CONFIG_X86_32 */ |