blob: 0460d04f0acc98366badf3a6d4b088a809786621 [file] [log] [blame]
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301/*
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01002 * User-space Probes (UProbes) for x86
Srikar Dronamraju2b144492012-02-09 14:56:42 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2011
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053023#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053027#include <linux/uaccess.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053028
29#include <linux/kdebug.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053030#include <asm/processor.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053031#include <asm/insn.h>
32
33/* Post-execution fixups. */
34
35/* No fixup needed */
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053036#define UPROBE_FIX_NONE 0x0
37
Srikar Dronamraju2b144492012-02-09 14:56:42 +053038/* Adjust IP back to vicinity of actual insn */
Srikar Dronamraju900771a2012-03-12 14:55:14 +053039#define UPROBE_FIX_IP 0x1
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053040
Srikar Dronamraju2b144492012-02-09 14:56:42 +053041/* Adjust the return address of a call insn */
Srikar Dronamraju900771a2012-03-12 14:55:14 +053042#define UPROBE_FIX_CALL 0x2
Srikar Dronamraju2b144492012-02-09 14:56:42 +053043
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +020044/* Instruction will modify TF, don't change it */
45#define UPROBE_FIX_SETF 0x4
46
Srikar Dronamraju900771a2012-03-12 14:55:14 +053047#define UPROBE_FIX_RIP_AX 0x8000
48#define UPROBE_FIX_RIP_CX 0x4000
Srikar Dronamraju2b144492012-02-09 14:56:42 +053049
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053050#define UPROBE_TRAP_NR UINT_MAX
51
Srikar Dronamraju2b144492012-02-09 14:56:42 +053052/* Adaptations for mhiramat x86 decoder v14. */
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010053#define OPCODE1(insn) ((insn)->opcode.bytes[0])
54#define OPCODE2(insn) ((insn)->opcode.bytes[1])
55#define OPCODE3(insn) ((insn)->opcode.bytes[2])
Oleg Nesterovddb69f22014-03-31 15:16:22 +020056#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
Srikar Dronamraju2b144492012-02-09 14:56:42 +053057
58#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
59 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
60 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
61 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
62 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
63 << (row % 32))
64
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053065/*
66 * Good-instruction tables for 32-bit apps. This is non-const and volatile
67 * to keep gcc from statically optimizing it out, as variable_test_bit makes
68 * some versions of gcc to think only *(unsigned long*) is used.
69 */
70static volatile u32 good_insns_32[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +053071 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
72 /* ---------------------------------------------- */
73 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
74 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
75 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
76 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
77 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
78 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
79 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
80 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
81 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
82 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
83 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
84 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
85 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
86 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
87 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
88 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
89 /* ---------------------------------------------- */
90 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
91};
92
93/* Using this for both 64-bit and 32-bit apps */
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053094static volatile u32 good_2byte_insns[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +053095 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
96 /* ---------------------------------------------- */
97 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
98 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
99 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
100 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
101 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
102 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
103 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
104 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
105 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
106 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
107 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
108 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
109 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
110 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
111 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
112 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
113 /* ---------------------------------------------- */
114 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
115};
116
Srikar Dronamraju04a3d982012-02-22 14:45:35 +0530117#ifdef CONFIG_X86_64
118/* Good-instruction tables for 64-bit apps */
119static volatile u32 good_insns_64[256 / 32] = {
120 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
121 /* ---------------------------------------------- */
122 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
123 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
124 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
125 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
126 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
127 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
128 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
129 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
130 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
131 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
132 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
133 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
134 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
135 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
136 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
137 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
138 /* ---------------------------------------------- */
139 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
140};
141#endif
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530142#undef W
143
144/*
145 * opcodes we'll probably never support:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100146 *
147 * 6c-6d, e4-e5, ec-ed - in
148 * 6e-6f, e6-e7, ee-ef - out
149 * cc, cd - int3, int
150 * cf - iret
151 * d6 - illegal instruction
152 * f1 - int1/icebp
153 * f4 - hlt
154 * fa, fb - cli, sti
155 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530156 *
157 * invalid opcodes in 64-bit mode:
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530158 *
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100159 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
160 * 63 - we support this opcode in x86_64 but not in i386.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530161 *
162 * opcodes we may need to refine support for:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100163 *
164 * 0f - 2-byte instructions: For many of these instructions, the validity
165 * depends on the prefix and/or the reg field. On such instructions, we
166 * just consider the opcode combination valid if it corresponds to any
167 * valid instruction.
168 *
169 * 8f - Group 1 - only reg = 0 is OK
170 * c6-c7 - Group 11 - only reg = 0 is OK
171 * d9-df - fpu insns with some illegal encodings
172 * f2, f3 - repnz, repz prefixes. These are also the first byte for
173 * certain floating-point instructions, such as addsd.
174 *
175 * fe - Group 4 - only reg = 0 or 1 is OK
176 * ff - Group 5 - only reg = 0-6 is OK
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530177 *
178 * others -- Do we need to support these?
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100179 *
180 * 0f - (floating-point?) prefetch instructions
181 * 07, 17, 1f - pop es, pop ss, pop ds
182 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530183 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100184 * 67 - addr16 prefix
185 * ce - into
186 * f0 - lock prefix
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530187 */
188
189/*
190 * TODO:
191 * - Where necessary, examine the modrm byte and allow only valid instructions
192 * in the different Groups and fpu instructions.
193 */
194
195static bool is_prefix_bad(struct insn *insn)
196{
197 int i;
198
199 for (i = 0; i < insn->prefixes.nbytes; i++) {
200 switch (insn->prefixes.bytes[i]) {
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100201 case 0x26: /* INAT_PFX_ES */
202 case 0x2E: /* INAT_PFX_CS */
203 case 0x36: /* INAT_PFX_DS */
204 case 0x3E: /* INAT_PFX_SS */
205 case 0xF0: /* INAT_PFX_LOCK */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530206 return true;
207 }
208 }
209 return false;
210}
211
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530212static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530213{
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530214 insn_init(insn, auprobe->insn, false);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530215
216 /* Skip good instruction prefixes; reject "bad" ones. */
217 insn_get_opcode(insn);
218 if (is_prefix_bad(insn))
219 return -ENOTSUPP;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100220
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530221 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
222 return 0;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100223
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530224 if (insn->opcode.nbytes == 2) {
225 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
226 return 0;
227 }
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100228
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530229 return -ENOTSUPP;
230}
231
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530232#ifdef CONFIG_X86_64
233/*
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530234 * If arch_uprobe->insn doesn't use rip-relative addressing, return
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530235 * immediately. Otherwise, rewrite the instruction so that it accesses
236 * its memory operand indirectly through a scratch register. Set
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530237 * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530238 * accordingly. (The contents of the scratch register will be saved
239 * before we single-step the modified instruction, and restored
240 * afterward.)
241 *
242 * We do this because a rip-relative instruction can access only a
243 * relatively small area (+/- 2 GB from the instruction), and the XOL
244 * area typically lies beyond that area. At least for instructions
245 * that store to memory, we can't execute the original instruction
246 * and "fix things up" later, because the misdirected store could be
247 * disastrous.
248 *
249 * Some useful facts about rip-relative instructions:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100250 *
251 * - There's always a modrm byte.
252 * - There's never a SIB byte.
253 * - The displacement is always 4 bytes.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530254 */
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530255static void
Oleg Nesterov59078d42014-03-31 18:09:36 +0200256handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530257{
258 u8 *cursor;
259 u8 reg;
260
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530261 if (!insn_rip_relative(insn))
262 return;
263
264 /*
265 * insn_rip_relative() would have decoded rex_prefix, modrm.
266 * Clear REX.b bit (extension of MODRM.rm field):
267 * we want to encode rax/rcx, not r8/r9.
268 */
269 if (insn->rex_prefix.nbytes) {
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530270 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530271 *cursor &= 0xfe; /* Clearing REX.B bit */
272 }
273
274 /*
275 * Point cursor at the modrm byte. The next 4 bytes are the
276 * displacement. Beyond the displacement, for some instructions,
277 * is the immediate operand.
278 */
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530279 cursor = auprobe->insn + insn_offset_modrm(insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530280 insn_get_length(insn);
281
282 /*
283 * Convert from rip-relative addressing to indirect addressing
284 * via a scratch register. Change the r/m field from 0x5 (%rip)
285 * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
286 */
287 reg = MODRM_REG(insn);
288 if (reg == 0) {
289 /*
290 * The register operand (if any) is either the A register
291 * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
292 * REX prefix) %r8. In any case, we know the C register
293 * is NOT the register operand, so we use %rcx (register
294 * #1) for the scratch register.
295 */
Srikar Dronamraju900771a2012-03-12 14:55:14 +0530296 auprobe->fixups = UPROBE_FIX_RIP_CX;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530297 /* Change modrm from 00 000 101 to 00 000 001. */
298 *cursor = 0x1;
299 } else {
300 /* Use %rax (register #0) for the scratch register. */
Srikar Dronamraju900771a2012-03-12 14:55:14 +0530301 auprobe->fixups = UPROBE_FIX_RIP_AX;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530302 /* Change modrm from 00 xxx 101 to 00 xxx 000 */
303 *cursor = (reg << 3);
304 }
305
306 /* Target address = address of next instruction + (signed) offset */
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530307 auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100308
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530309 /* Displacement field is gone; slide immediate field (if any) over. */
310 if (insn->immediate.nbytes) {
311 cursor++;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100312 memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530313 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530314}
315
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200316/*
317 * If we're emulating a rip-relative instruction, save the contents
318 * of the scratch register and store the target address in that register.
319 */
320static void
321pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
322 struct arch_uprobe_task *autask)
323{
324 if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
325 autask->saved_scratch_register = regs->ax;
326 regs->ax = current->utask->vaddr;
327 regs->ax += auprobe->rip_rela_target_address;
328 } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
329 autask->saved_scratch_register = regs->cx;
330 regs->cx = current->utask->vaddr;
331 regs->cx += auprobe->rip_rela_target_address;
332 }
333}
334
335static void
336handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
337{
338 if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
339 struct arch_uprobe_task *autask;
340
341 autask = &current->utask->autask;
342 if (auprobe->fixups & UPROBE_FIX_RIP_AX)
343 regs->ax = autask->saved_scratch_register;
344 else
345 regs->cx = autask->saved_scratch_register;
346
347 /*
348 * The original instruction includes a displacement, and so
349 * is 4 bytes longer than what we've just single-stepped.
350 * Caller may need to apply other fixups to handle stuff
351 * like "jmpq *...(%rip)" and "callq *...(%rip)".
352 */
353 if (correction)
354 *correction += 4;
355 }
356}
357
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530358static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530359{
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530360 insn_init(insn, auprobe->insn, true);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530361
362 /* Skip good instruction prefixes; reject "bad" ones. */
363 insn_get_opcode(insn);
364 if (is_prefix_bad(insn))
365 return -ENOTSUPP;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100366
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530367 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
368 return 0;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100369
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530370 if (insn->opcode.nbytes == 2) {
371 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
372 return 0;
373 }
374 return -ENOTSUPP;
375}
376
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530377static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530378{
379 if (mm->context.ia32_compat)
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530380 return validate_insn_32bits(auprobe, insn);
381 return validate_insn_64bits(auprobe, insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530382}
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100383#else /* 32-bit: */
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200384/*
385 * No RIP-relative addressing on 32-bit
386 */
Oleg Nesterov59078d42014-03-31 18:09:36 +0200387static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530388{
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200389}
390static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
391 struct arch_uprobe_task *autask)
392{
393}
394static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
395 long *correction)
396{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530397}
398
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530399static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530400{
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530401 return validate_insn_32bits(auprobe, insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530402}
403#endif /* CONFIG_X86_64 */
404
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200405struct uprobe_xol_ops {
406 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
407 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
408 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
409};
410
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200411static inline int sizeof_long(void)
412{
413 return is_ia32_task() ? 4 : 8;
414}
415
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200416static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
417{
418 pre_xol_rip_insn(auprobe, regs, &current->utask->autask);
419 return 0;
420}
421
422/*
423 * Adjust the return address pushed by a call insn executed out of line.
424 */
425static int adjust_ret_addr(unsigned long sp, long correction)
426{
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200427 int rasize = sizeof_long();
428 long ra;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200429
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200430 if (copy_from_user(&ra, (void __user *)sp, rasize))
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200431 return -EFAULT;
432
433 ra += correction;
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200434 if (copy_to_user((void __user *)sp, &ra, rasize))
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200435 return -EFAULT;
436
437 return 0;
438}
439
440static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
441{
442 struct uprobe_task *utask = current->utask;
443 long correction = (long)(utask->vaddr - utask->xol_vaddr);
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200444
445 handle_riprel_post_xol(auprobe, regs, &correction);
446 if (auprobe->fixups & UPROBE_FIX_IP)
447 regs->ip += correction;
448
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200449 if (auprobe->fixups & UPROBE_FIX_CALL) {
450 if (adjust_ret_addr(regs->sp, correction)) {
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200451 regs->sp += sizeof_long();
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200452 return -ERESTART;
453 }
454 }
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200455
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200456 return 0;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200457}
458
459static struct uprobe_xol_ops default_xol_ops = {
460 .pre_xol = default_pre_xol_op,
461 .post_xol = default_post_xol_op,
462};
463
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200464static bool branch_is_call(struct arch_uprobe *auprobe)
465{
466 return auprobe->branch.opc1 == 0xe8;
467}
468
Oleg Nesterov8f955052014-04-06 21:53:47 +0200469#define CASE_COND \
470 COND(70, 71, XF(OF)) \
471 COND(72, 73, XF(CF)) \
472 COND(74, 75, XF(ZF)) \
473 COND(78, 79, XF(SF)) \
474 COND(7a, 7b, XF(PF)) \
475 COND(76, 77, XF(CF) || XF(ZF)) \
476 COND(7c, 7d, XF(SF) != XF(OF)) \
477 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
478
479#define COND(op_y, op_n, expr) \
480 case 0x ## op_y: DO((expr) != 0) \
481 case 0x ## op_n: DO((expr) == 0)
482
483#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
484
485static bool is_cond_jmp_opcode(u8 opcode)
486{
487 switch (opcode) {
488 #define DO(expr) \
489 return true;
490 CASE_COND
491 #undef DO
492
493 default:
494 return false;
495 }
496}
497
498static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
499{
500 unsigned long flags = regs->flags;
501
502 switch (auprobe->branch.opc1) {
503 #define DO(expr) \
504 return expr;
505 CASE_COND
506 #undef DO
507
508 default: /* not a conditional jmp */
509 return true;
510 }
511}
512
513#undef XF
514#undef COND
515#undef CASE_COND
516
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200517static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
518{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200519 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200520 unsigned long offs = (long)auprobe->branch.offs;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200521
522 if (branch_is_call(auprobe)) {
523 unsigned long new_sp = regs->sp - sizeof_long();
524 /*
525 * If it fails we execute this (mangled, see the comment in
526 * branch_clear_offset) insn out-of-line. In the likely case
527 * this should trigger the trap, and the probed application
528 * should die or restart the same insn after it handles the
529 * signal, arch_uprobe_post_xol() won't be even called.
530 *
531 * But there is corner case, see the comment in ->post_xol().
532 */
533 if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long()))
534 return false;
535 regs->sp = new_sp;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200536 } else if (!check_jmp_cond(auprobe, regs)) {
537 offs = 0;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200538 }
539
Oleg Nesterov8f955052014-04-06 21:53:47 +0200540 regs->ip = new_ip + offs;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200541 return true;
542}
543
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200544static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
545{
546 BUG_ON(!branch_is_call(auprobe));
547 /*
548 * We can only get here if branch_emulate_op() failed to push the ret
549 * address _and_ another thread expanded our stack before the (mangled)
550 * "call" insn was executed out-of-line. Just restore ->sp and restart.
551 * We could also restore ->ip and try to call branch_emulate_op() again.
552 */
553 regs->sp += sizeof_long();
554 return -ERESTART;
555}
556
557static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
558{
559 /*
560 * Turn this insn into "call 1f; 1:", this is what we will execute
561 * out-of-line if ->emulate() fails. We only need this to generate
562 * a trap, so that the probed task receives the correct signal with
563 * the properly filled siginfo.
564 *
565 * But see the comment in ->post_xol(), in the unlikely case it can
566 * succeed. So we need to ensure that the new ->ip can not fall into
567 * the non-canonical area and trigger #GP.
568 *
569 * We could turn it into (say) "pushf", but then we would need to
570 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
571 * of ->insn[] for set_orig_insn().
572 */
573 memset(auprobe->insn + insn_offset_immediate(insn),
574 0, insn->immediate.nbytes);
575}
576
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200577static struct uprobe_xol_ops branch_xol_ops = {
578 .emulate = branch_emulate_op,
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200579 .post_xol = branch_post_xol_op,
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200580};
581
582/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
583static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
584{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200585 u8 opc1 = OPCODE1(insn);
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200586
587 /* has the side-effect of processing the entire instruction */
588 insn_get_length(insn);
589 if (WARN_ON_ONCE(!insn_complete(insn)))
590 return -ENOEXEC;
591
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200592 switch (opc1) {
593 case 0xeb: /* jmp 8 */
594 case 0xe9: /* jmp 32 */
595 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
596 break;
597
598 case 0xe8: /* call relative */
599 branch_clear_offset(auprobe, insn);
600 break;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200601
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200602 default:
Oleg Nesterov8f955052014-04-06 21:53:47 +0200603 if (!is_cond_jmp_opcode(opc1))
604 return -ENOSYS;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200605 }
606
607 auprobe->branch.opc1 = opc1;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200608 auprobe->branch.ilen = insn->length;
609 auprobe->branch.offs = insn->immediate.value;
610
611 auprobe->ops = &branch_xol_ops;
612 return 0;
613}
614
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530615/**
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530616 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530617 * @mm: the probed address space.
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530618 * @arch_uprobe: the probepoint information.
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530619 * @addr: virtual address at which to install the probepoint
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530620 * Return 0 on success or a -ve number on error.
621 */
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530622int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530623{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530624 struct insn insn;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200625 bool fix_ip = true, fix_call = false;
626 int ret;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530627
Srikar Dronamrajue3343e62012-03-12 14:55:30 +0530628 ret = validate_insn_bits(auprobe, mm, &insn);
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200629 if (ret)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530630 return ret;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100631
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200632 ret = branch_setup_xol_ops(auprobe, &insn);
633 if (ret != -ENOSYS)
634 return ret;
635
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200636 /*
637 * Figure out which fixups arch_uprobe_post_xol() will need to perform,
638 * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
639 * is either zero or it reflects rip-related fixups.
640 */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200641 switch (OPCODE1(&insn)) {
642 case 0x9d: /* popf */
643 auprobe->fixups |= UPROBE_FIX_SETF;
644 break;
645 case 0xc3: /* ret or lret -- ip is correct */
646 case 0xcb:
647 case 0xc2:
648 case 0xca:
649 fix_ip = false;
650 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200651 case 0x9a: /* call absolute - Fix return addr, not ip */
652 fix_call = true;
653 fix_ip = false;
654 break;
655 case 0xea: /* jmp absolute -- ip is correct */
656 fix_ip = false;
657 break;
658 case 0xff:
659 insn_get_modrm(&insn);
660 switch (MODRM_REG(&insn)) {
661 case 2: case 3: /* call or lcall, indirect */
662 fix_call = true;
663 case 4: case 5: /* jmp or ljmp, indirect */
664 fix_ip = false;
665 }
Oleg Nesterove55848a2014-03-31 17:24:14 +0200666 /* fall through */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200667 default:
Oleg Nesterove55848a2014-03-31 17:24:14 +0200668 handle_riprel_insn(auprobe, &insn);
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200669 }
670
671 if (fix_ip)
672 auprobe->fixups |= UPROBE_FIX_IP;
673 if (fix_call)
674 auprobe->fixups |= UPROBE_FIX_CALL;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100675
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200676 auprobe->ops = &default_xol_ops;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530677 return 0;
678}
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530679
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530680/*
681 * arch_uprobe_pre_xol - prepare to execute out of line.
682 * @auprobe: the probepoint information.
683 * @regs: reflects the saved user state of current task.
684 */
685int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
686{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200687 struct uprobe_task *utask = current->utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530688
Oleg Nesterov34e73172014-03-31 19:38:09 +0200689 regs->ip = utask->xol_vaddr;
690 utask->autask.saved_trap_nr = current->thread.trap_nr;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530691 current->thread.trap_nr = UPROBE_TRAP_NR;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530692
Oleg Nesterov34e73172014-03-31 19:38:09 +0200693 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100694 regs->flags |= X86_EFLAGS_TF;
695 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
696 set_task_blockstep(current, false);
697
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200698 if (auprobe->ops->pre_xol)
699 return auprobe->ops->pre_xol(auprobe, regs);
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530700 return 0;
701}
702
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530703/*
704 * If xol insn itself traps and generates a signal(Say,
705 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
706 * instruction jumps back to its own address. It is assumed that anything
707 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
708 *
709 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
710 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
711 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
712 */
713bool arch_uprobe_xol_was_trapped(struct task_struct *t)
714{
715 if (t->thread.trap_nr != UPROBE_TRAP_NR)
716 return true;
717
718 return false;
719}
720
721/*
722 * Called after single-stepping. To avoid the SMP problems that can
723 * occur when we temporarily put back the original opcode to
724 * single-step, we single-stepped a copy of the instruction.
725 *
726 * This function prepares to resume execution after the single-step.
727 * We have to fix things up as follows:
728 *
729 * Typically, the new ip is relative to the copied instruction. We need
730 * to make it relative to the original instruction (FIX_IP). Exceptions
731 * are return instructions and absolute or indirect jump or call instructions.
732 *
733 * If the single-stepped instruction was a call, the return address that
734 * is atop the stack is the address following the copied instruction. We
735 * need to make it the address following the original instruction (FIX_CALL).
736 *
737 * If the original instruction was a rip-relative instruction such as
738 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
739 * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
740 * We need to restore the contents of the scratch register and adjust
741 * the ip, keeping in mind that the instruction we executed is 4 bytes
742 * shorter than the original instruction (since we squeezed out the offset
743 * field). (FIX_RIP_AX or FIX_RIP_CX)
744 */
745int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
746{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200747 struct uprobe_task *utask = current->utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530748
749 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
Oleg Nesterov014940b2014-04-03 20:20:10 +0200750
751 if (auprobe->ops->post_xol) {
752 int err = auprobe->ops->post_xol(auprobe, regs);
753 if (err) {
754 arch_uprobe_abort_xol(auprobe, regs);
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200755 /*
756 * Restart the probed insn. ->post_xol() must ensure
757 * this is really possible if it returns -ERESTART.
758 */
759 if (err == -ERESTART)
760 return 0;
Oleg Nesterov014940b2014-04-03 20:20:10 +0200761 return err;
762 }
763 }
764
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530765 current->thread.trap_nr = utask->autask.saved_trap_nr;
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100766 /*
767 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
768 * so we can get an extra SIGTRAP if we do not clear TF. We need
769 * to examine the opcode to make it right.
770 */
771 if (utask->autask.saved_tf)
772 send_sig(SIGTRAP, current, 0);
773 else if (!(auprobe->fixups & UPROBE_FIX_SETF))
774 regs->flags &= ~X86_EFLAGS_TF;
775
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200776 return 0;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530777}
778
779/* callback routine for handling exceptions. */
780int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
781{
782 struct die_args *args = data;
783 struct pt_regs *regs = args->regs;
784 int ret = NOTIFY_DONE;
785
786 /* We are only interested in userspace traps */
787 if (regs && !user_mode_vm(regs))
788 return NOTIFY_DONE;
789
790 switch (val) {
791 case DIE_INT3:
792 if (uprobe_pre_sstep_notifier(regs))
793 ret = NOTIFY_STOP;
794
795 break;
796
797 case DIE_DEBUG:
798 if (uprobe_post_sstep_notifier(regs))
799 ret = NOTIFY_STOP;
800
801 default:
802 break;
803 }
804
805 return ret;
806}
807
808/*
809 * This function gets called when XOL instruction either gets trapped or
Oleg Nesterov014940b2014-04-03 20:20:10 +0200810 * the thread has a fatal signal, or if arch_uprobe_post_xol() failed.
811 * Reset the instruction pointer to its probed address for the potential
812 * restart or for post mortem analysis.
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530813 */
814void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
815{
816 struct uprobe_task *utask = current->utask;
817
818 current->thread.trap_nr = utask->autask.saved_trap_nr;
819 handle_riprel_post_xol(auprobe, regs, NULL);
820 instruction_pointer_set(regs, utask->vaddr);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100821
822 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
823 if (!utask->autask.saved_tf)
824 regs->flags &= ~X86_EFLAGS_TF;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530825}
826
Oleg Nesterov3a4664a2012-09-03 16:05:10 +0200827static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530828{
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200829 if (auprobe->ops->emulate)
830 return auprobe->ops->emulate(auprobe, regs);
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530831 return false;
832}
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +0200833
Oleg Nesterov3a4664a2012-09-03 16:05:10 +0200834bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
835{
836 bool ret = __skip_sstep(auprobe, regs);
837 if (ret && (regs->flags & X86_EFLAGS_TF))
838 send_sig(SIGTRAP, current, 0);
839 return ret;
840}
Anton Arapov791eca12013-04-03 18:00:33 +0200841
842unsigned long
843arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
844{
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200845 int rasize = sizeof_long(), nleft;
Anton Arapov791eca12013-04-03 18:00:33 +0200846 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
847
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200848 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
Anton Arapov791eca12013-04-03 18:00:33 +0200849 return -1;
850
851 /* check whether address has been already hijacked */
852 if (orig_ret_vaddr == trampoline_vaddr)
853 return orig_ret_vaddr;
854
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200855 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
856 if (likely(!nleft))
Anton Arapov791eca12013-04-03 18:00:33 +0200857 return orig_ret_vaddr;
858
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200859 if (nleft != rasize) {
Anton Arapov791eca12013-04-03 18:00:33 +0200860 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
861 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
862
863 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
864 }
865
866 return -1;
867}