blob: 31dcb4d5ea4628fd3a80b4f893cec61c06b30a95 [file] [log] [blame]
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301/*
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01002 * User-space Probes (UProbes) for x86
Srikar Dronamraju2b144492012-02-09 14:56:42 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2011
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053023#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053027#include <linux/uaccess.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053028
29#include <linux/kdebug.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053030#include <asm/processor.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053031#include <asm/insn.h>
32
33/* Post-execution fixups. */
34
Srikar Dronamraju2b144492012-02-09 14:56:42 +053035/* Adjust IP back to vicinity of actual insn */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020036#define UPROBE_FIX_IP 0x01
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053037
Srikar Dronamraju2b144492012-02-09 14:56:42 +053038/* Adjust the return address of a call insn */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020039#define UPROBE_FIX_CALL 0x02
Srikar Dronamraju2b144492012-02-09 14:56:42 +053040
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +020041/* Instruction will modify TF, don't change it */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020042#define UPROBE_FIX_SETF 0x04
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +020043
Oleg Nesterov78d9af42014-04-24 18:52:37 +020044#define UPROBE_FIX_RIP_AX 0x08
45#define UPROBE_FIX_RIP_CX 0x10
Srikar Dronamraju2b144492012-02-09 14:56:42 +053046
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053047#define UPROBE_TRAP_NR UINT_MAX
48
Srikar Dronamraju2b144492012-02-09 14:56:42 +053049/* Adaptations for mhiramat x86 decoder v14. */
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010050#define OPCODE1(insn) ((insn)->opcode.bytes[0])
51#define OPCODE2(insn) ((insn)->opcode.bytes[1])
52#define OPCODE3(insn) ((insn)->opcode.bytes[2])
Oleg Nesterovddb69f22014-03-31 15:16:22 +020053#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
Srikar Dronamraju2b144492012-02-09 14:56:42 +053054
55#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
56 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
57 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
58 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
59 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
60 << (row % 32))
61
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053062/*
63 * Good-instruction tables for 32-bit apps. This is non-const and volatile
64 * to keep gcc from statically optimizing it out, as variable_test_bit makes
65 * some versions of gcc to think only *(unsigned long*) is used.
66 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +020067#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053068static volatile u32 good_insns_32[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +053069 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
70 /* ---------------------------------------------- */
71 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
72 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
73 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
74 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
75 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
76 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
77 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
78 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
79 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
80 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
81 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
82 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
83 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
84 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
85 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
86 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
87 /* ---------------------------------------------- */
88 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
89};
Oleg Nesterov8dbacad2014-04-19 16:07:15 +020090#else
91#define good_insns_32 NULL
92#endif
93
94/* Good-instruction tables for 64-bit apps */
95#if defined(CONFIG_X86_64)
96static volatile u32 good_insns_64[256 / 32] = {
97 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
98 /* ---------------------------------------------- */
99 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
100 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
101 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
102 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
103 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
104 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
105 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
106 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
107 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
108 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
109 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
110 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
111 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
112 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
113 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
114 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
115 /* ---------------------------------------------- */
116 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
117};
118#else
119#define good_insns_64 NULL
120#endif
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530121
122/* Using this for both 64-bit and 32-bit apps */
Srikar Dronamraju04a3d982012-02-22 14:45:35 +0530123static volatile u32 good_2byte_insns[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530124 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
125 /* ---------------------------------------------- */
126 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
127 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
128 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
129 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
130 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
131 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
132 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
133 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
134 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
135 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
136 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
137 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
138 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
139 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
140 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
141 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
142 /* ---------------------------------------------- */
143 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
144};
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530145#undef W
146
147/*
148 * opcodes we'll probably never support:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100149 *
150 * 6c-6d, e4-e5, ec-ed - in
151 * 6e-6f, e6-e7, ee-ef - out
152 * cc, cd - int3, int
153 * cf - iret
154 * d6 - illegal instruction
155 * f1 - int1/icebp
156 * f4 - hlt
157 * fa, fb - cli, sti
158 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530159 *
160 * invalid opcodes in 64-bit mode:
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530161 *
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100162 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
163 * 63 - we support this opcode in x86_64 but not in i386.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530164 *
165 * opcodes we may need to refine support for:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100166 *
167 * 0f - 2-byte instructions: For many of these instructions, the validity
168 * depends on the prefix and/or the reg field. On such instructions, we
169 * just consider the opcode combination valid if it corresponds to any
170 * valid instruction.
171 *
172 * 8f - Group 1 - only reg = 0 is OK
173 * c6-c7 - Group 11 - only reg = 0 is OK
174 * d9-df - fpu insns with some illegal encodings
175 * f2, f3 - repnz, repz prefixes. These are also the first byte for
176 * certain floating-point instructions, such as addsd.
177 *
178 * fe - Group 4 - only reg = 0 or 1 is OK
179 * ff - Group 5 - only reg = 0-6 is OK
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530180 *
181 * others -- Do we need to support these?
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100182 *
183 * 0f - (floating-point?) prefetch instructions
184 * 07, 17, 1f - pop es, pop ss, pop ds
185 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530186 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100187 * 67 - addr16 prefix
188 * ce - into
189 * f0 - lock prefix
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530190 */
191
192/*
193 * TODO:
194 * - Where necessary, examine the modrm byte and allow only valid instructions
195 * in the different Groups and fpu instructions.
196 */
197
198static bool is_prefix_bad(struct insn *insn)
199{
200 int i;
201
202 for (i = 0; i < insn->prefixes.nbytes; i++) {
203 switch (insn->prefixes.bytes[i]) {
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100204 case 0x26: /* INAT_PFX_ES */
205 case 0x2E: /* INAT_PFX_CS */
206 case 0x36: /* INAT_PFX_DS */
207 case 0x3E: /* INAT_PFX_SS */
208 case 0xF0: /* INAT_PFX_LOCK */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530209 return true;
210 }
211 }
212 return false;
213}
214
Oleg Nesterov73175d02014-04-19 12:34:02 +0200215static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530216{
Oleg Nesterov73175d02014-04-19 12:34:02 +0200217 u32 volatile *good_insns;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530218
Oleg Nesterov73175d02014-04-19 12:34:02 +0200219 insn_init(insn, auprobe->insn, x86_64);
Oleg Nesterovff261962014-04-19 14:15:27 +0200220 /* has the side-effect of processing the entire instruction */
221 insn_get_length(insn);
222 if (WARN_ON_ONCE(!insn_complete(insn)))
223 return -ENOEXEC;
Oleg Nesterov73175d02014-04-19 12:34:02 +0200224
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530225 if (is_prefix_bad(insn))
226 return -ENOTSUPP;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100227
Oleg Nesterov73175d02014-04-19 12:34:02 +0200228 if (x86_64)
229 good_insns = good_insns_64;
230 else
231 good_insns = good_insns_32;
232
233 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530234 return 0;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100235
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530236 if (insn->opcode.nbytes == 2) {
237 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
238 return 0;
239 }
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100240
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530241 return -ENOTSUPP;
242}
243
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530244#ifdef CONFIG_X86_64
Oleg Nesterov2ae1f492014-04-19 14:03:05 +0200245static inline bool is_64bit_mm(struct mm_struct *mm)
246{
247 return !config_enabled(CONFIG_IA32_EMULATION) ||
Oleg Nesterovb24dc8d2014-04-19 18:10:09 +0200248 !(mm->context.ia32_compat == TIF_IA32);
Oleg Nesterov2ae1f492014-04-19 14:03:05 +0200249}
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530250/*
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530251 * If arch_uprobe->insn doesn't use rip-relative addressing, return
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530252 * immediately. Otherwise, rewrite the instruction so that it accesses
253 * its memory operand indirectly through a scratch register. Set
Denys Vlasenko50204c62014-05-01 16:52:46 +0200254 * def->fixups accordingly. (The contents of the scratch register
255 * will be saved before we single-step the modified instruction,
256 * and restored afterward).
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530257 *
258 * We do this because a rip-relative instruction can access only a
259 * relatively small area (+/- 2 GB from the instruction), and the XOL
260 * area typically lies beyond that area. At least for instructions
261 * that store to memory, we can't execute the original instruction
262 * and "fix things up" later, because the misdirected store could be
263 * disastrous.
264 *
265 * Some useful facts about rip-relative instructions:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100266 *
Denys Vlasenko50204c62014-05-01 16:52:46 +0200267 * - There's always a modrm byte with bit layout "00 reg 101".
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100268 * - There's never a SIB byte.
269 * - The displacement is always 4 bytes.
Denys Vlasenko50204c62014-05-01 16:52:46 +0200270 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
271 * has no effect on rip-relative mode. It doesn't make modrm byte
272 * with r/m=101 refer to register 1101 = R13.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530273 */
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200274static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530275{
276 u8 *cursor;
277 u8 reg;
278
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530279 if (!insn_rip_relative(insn))
280 return;
281
282 /*
283 * insn_rip_relative() would have decoded rex_prefix, modrm.
284 * Clear REX.b bit (extension of MODRM.rm field):
285 * we want to encode rax/rcx, not r8/r9.
286 */
287 if (insn->rex_prefix.nbytes) {
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530288 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530289 *cursor &= 0xfe; /* Clearing REX.B bit */
290 }
291
292 /*
293 * Point cursor at the modrm byte. The next 4 bytes are the
294 * displacement. Beyond the displacement, for some instructions,
295 * is the immediate operand.
296 */
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530297 cursor = auprobe->insn + insn_offset_modrm(insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530298 /*
Denys Vlasenko50204c62014-05-01 16:52:46 +0200299 * Convert from rip-relative addressing
300 * to register-relative addressing via a scratch register.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530301 */
302 reg = MODRM_REG(insn);
303 if (reg == 0) {
304 /*
305 * The register operand (if any) is either the A register
306 * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
307 * REX prefix) %r8. In any case, we know the C register
308 * is NOT the register operand, so we use %rcx (register
309 * #1) for the scratch register.
310 */
Oleg Nesterov78d9af42014-04-24 18:52:37 +0200311 auprobe->def.fixups |= UPROBE_FIX_RIP_CX;
Denys Vlasenko50204c62014-05-01 16:52:46 +0200312 /*
313 * Change modrm from "00 000 101" to "10 000 001". Example:
314 * 89 05 disp32 mov %eax,disp32(%rip) becomes
315 * 89 81 disp32 mov %eax,disp32(%rcx)
316 */
317 *cursor = 0x81;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530318 } else {
319 /* Use %rax (register #0) for the scratch register. */
Oleg Nesterov78d9af42014-04-24 18:52:37 +0200320 auprobe->def.fixups |= UPROBE_FIX_RIP_AX;
Denys Vlasenko50204c62014-05-01 16:52:46 +0200321 /*
322 * Change modrm from "00 reg 101" to "10 reg 000". Example:
323 * 89 1d disp32 mov %edx,disp32(%rip) becomes
324 * 89 98 disp32 mov %edx,disp32(%rax)
325 */
326 *cursor = (reg << 3) | 0x80;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530327 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530328}
329
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200330static inline unsigned long *
331scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
332{
333 return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? &regs->ax : &regs->cx;
334}
335
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200336/*
337 * If we're emulating a rip-relative instruction, save the contents
338 * of the scratch register and store the target address in that register.
339 */
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200340static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200341{
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200342 if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
343 struct uprobe_task *utask = current->utask;
344 unsigned long *sr = scratch_reg(auprobe, regs);
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200345
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200346 utask->autask.saved_scratch_register = *sr;
Denys Vlasenko50204c62014-05-01 16:52:46 +0200347 *sr = utask->vaddr + auprobe->def.ilen;
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200348 }
349}
350
Denys Vlasenko50204c62014-05-01 16:52:46 +0200351static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200352{
Oleg Nesterov97aa5cd2014-04-22 16:20:55 +0200353 if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200354 struct uprobe_task *utask = current->utask;
355 unsigned long *sr = scratch_reg(auprobe, regs);
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200356
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200357 *sr = utask->autask.saved_scratch_register;
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200358 }
359}
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100360#else /* 32-bit: */
Oleg Nesterov2ae1f492014-04-19 14:03:05 +0200361static inline bool is_64bit_mm(struct mm_struct *mm)
362{
363 return false;
364}
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200365/*
366 * No RIP-relative addressing on 32-bit
367 */
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200368static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530369{
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200370}
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200371static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200372{
373}
Denys Vlasenko50204c62014-05-01 16:52:46 +0200374static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200375{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530376}
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530377#endif /* CONFIG_X86_64 */
378
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200379struct uprobe_xol_ops {
380 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
381 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
382 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200383 void (*abort)(struct arch_uprobe *, struct pt_regs *);
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200384};
385
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200386static inline int sizeof_long(void)
387{
388 return is_ia32_task() ? 4 : 8;
389}
390
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200391static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
392{
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200393 riprel_pre_xol(auprobe, regs);
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200394 return 0;
395}
396
Oleg Nesterov2b82cad2014-04-24 19:21:38 +0200397static int push_ret_address(struct pt_regs *regs, unsigned long ip)
398{
399 unsigned long new_sp = regs->sp - sizeof_long();
400
401 if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
402 return -EFAULT;
403
404 regs->sp = new_sp;
405 return 0;
406}
407
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200408static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
409{
410 struct uprobe_task *utask = current->utask;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200411
Denys Vlasenko50204c62014-05-01 16:52:46 +0200412 riprel_post_xol(auprobe, regs);
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200413 if (auprobe->def.fixups & UPROBE_FIX_IP) {
Denys Vlasenko50204c62014-05-01 16:52:46 +0200414 long correction = utask->vaddr - utask->xol_vaddr;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200415 regs->ip += correction;
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200416 } else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
Oleg Nesterov1dc76e62014-04-25 18:06:19 +0200417 regs->sp += sizeof_long();
418 if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen))
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200419 return -ERESTART;
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200420 }
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200421 /* popf; tell the caller to not touch TF */
Oleg Nesterov97aa5cd2014-04-22 16:20:55 +0200422 if (auprobe->def.fixups & UPROBE_FIX_SETF)
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200423 utask->autask.saved_tf = true;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200424
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200425 return 0;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200426}
427
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200428static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
429{
Denys Vlasenko50204c62014-05-01 16:52:46 +0200430 riprel_post_xol(auprobe, regs);
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200431}
432
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200433static struct uprobe_xol_ops default_xol_ops = {
434 .pre_xol = default_pre_xol_op,
435 .post_xol = default_post_xol_op,
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200436 .abort = default_abort_op,
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200437};
438
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200439static bool branch_is_call(struct arch_uprobe *auprobe)
440{
441 return auprobe->branch.opc1 == 0xe8;
442}
443
Oleg Nesterov8f955052014-04-06 21:53:47 +0200444#define CASE_COND \
445 COND(70, 71, XF(OF)) \
446 COND(72, 73, XF(CF)) \
447 COND(74, 75, XF(ZF)) \
448 COND(78, 79, XF(SF)) \
449 COND(7a, 7b, XF(PF)) \
450 COND(76, 77, XF(CF) || XF(ZF)) \
451 COND(7c, 7d, XF(SF) != XF(OF)) \
452 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
453
454#define COND(op_y, op_n, expr) \
455 case 0x ## op_y: DO((expr) != 0) \
456 case 0x ## op_n: DO((expr) == 0)
457
458#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
459
460static bool is_cond_jmp_opcode(u8 opcode)
461{
462 switch (opcode) {
463 #define DO(expr) \
464 return true;
465 CASE_COND
466 #undef DO
467
468 default:
469 return false;
470 }
471}
472
473static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
474{
475 unsigned long flags = regs->flags;
476
477 switch (auprobe->branch.opc1) {
478 #define DO(expr) \
479 return expr;
480 CASE_COND
481 #undef DO
482
483 default: /* not a conditional jmp */
484 return true;
485 }
486}
487
488#undef XF
489#undef COND
490#undef CASE_COND
491
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200492static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
493{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200494 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200495 unsigned long offs = (long)auprobe->branch.offs;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200496
497 if (branch_is_call(auprobe)) {
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200498 /*
499 * If it fails we execute this (mangled, see the comment in
500 * branch_clear_offset) insn out-of-line. In the likely case
501 * this should trigger the trap, and the probed application
502 * should die or restart the same insn after it handles the
503 * signal, arch_uprobe_post_xol() won't be even called.
504 *
505 * But there is corner case, see the comment in ->post_xol().
506 */
Oleg Nesterov2b82cad2014-04-24 19:21:38 +0200507 if (push_ret_address(regs, new_ip))
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200508 return false;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200509 } else if (!check_jmp_cond(auprobe, regs)) {
510 offs = 0;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200511 }
512
Oleg Nesterov8f955052014-04-06 21:53:47 +0200513 regs->ip = new_ip + offs;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200514 return true;
515}
516
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200517static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
518{
519 BUG_ON(!branch_is_call(auprobe));
520 /*
521 * We can only get here if branch_emulate_op() failed to push the ret
522 * address _and_ another thread expanded our stack before the (mangled)
523 * "call" insn was executed out-of-line. Just restore ->sp and restart.
524 * We could also restore ->ip and try to call branch_emulate_op() again.
525 */
526 regs->sp += sizeof_long();
527 return -ERESTART;
528}
529
530static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
531{
532 /*
533 * Turn this insn into "call 1f; 1:", this is what we will execute
534 * out-of-line if ->emulate() fails. We only need this to generate
535 * a trap, so that the probed task receives the correct signal with
536 * the properly filled siginfo.
537 *
538 * But see the comment in ->post_xol(), in the unlikely case it can
539 * succeed. So we need to ensure that the new ->ip can not fall into
540 * the non-canonical area and trigger #GP.
541 *
542 * We could turn it into (say) "pushf", but then we would need to
543 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
544 * of ->insn[] for set_orig_insn().
545 */
546 memset(auprobe->insn + insn_offset_immediate(insn),
547 0, insn->immediate.nbytes);
548}
549
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200550static struct uprobe_xol_ops branch_xol_ops = {
551 .emulate = branch_emulate_op,
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200552 .post_xol = branch_post_xol_op,
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200553};
554
555/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
556static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
557{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200558 u8 opc1 = OPCODE1(insn);
Denys Vlasenko250bbd12014-04-24 19:08:24 +0200559 int i;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200560
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200561 switch (opc1) {
562 case 0xeb: /* jmp 8 */
563 case 0xe9: /* jmp 32 */
564 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
565 break;
566
567 case 0xe8: /* call relative */
568 branch_clear_offset(auprobe, insn);
569 break;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200570
Oleg Nesterov6cc5e7f2014-04-07 16:22:58 +0200571 case 0x0f:
572 if (insn->opcode.nbytes != 2)
573 return -ENOSYS;
574 /*
575 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
576 * OPCODE1() of the "short" jmp which checks the same condition.
577 */
578 opc1 = OPCODE2(insn) - 0x10;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200579 default:
Oleg Nesterov8f955052014-04-06 21:53:47 +0200580 if (!is_cond_jmp_opcode(opc1))
581 return -ENOSYS;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200582 }
583
Denys Vlasenko250bbd12014-04-24 19:08:24 +0200584 /*
585 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
586 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
587 * No one uses these insns, reject any branch insns with such prefix.
588 */
589 for (i = 0; i < insn->prefixes.nbytes; i++) {
590 if (insn->prefixes.bytes[i] == 0x66)
591 return -ENOTSUPP;
592 }
593
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200594 auprobe->branch.opc1 = opc1;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200595 auprobe->branch.ilen = insn->length;
596 auprobe->branch.offs = insn->immediate.value;
597
598 auprobe->ops = &branch_xol_ops;
599 return 0;
600}
601
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530602/**
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530603 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530604 * @mm: the probed address space.
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530605 * @arch_uprobe: the probepoint information.
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530606 * @addr: virtual address at which to install the probepoint
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530607 * Return 0 on success or a -ve number on error.
608 */
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530609int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530610{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530611 struct insn insn;
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200612 u8 fix_ip_or_call = UPROBE_FIX_IP;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200613 int ret;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530614
Oleg Nesterov2ae1f492014-04-19 14:03:05 +0200615 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200616 if (ret)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530617 return ret;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100618
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200619 ret = branch_setup_xol_ops(auprobe, &insn);
620 if (ret != -ENOSYS)
621 return ret;
622
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200623 /*
Oleg Nesterov97aa5cd2014-04-22 16:20:55 +0200624 * Figure out which fixups default_post_xol_op() will need to perform,
625 * and annotate def->fixups accordingly. To start with, ->fixups is
626 * either zero or it reflects rip-related fixups.
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200627 */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200628 switch (OPCODE1(&insn)) {
629 case 0x9d: /* popf */
Oleg Nesterov97aa5cd2014-04-22 16:20:55 +0200630 auprobe->def.fixups |= UPROBE_FIX_SETF;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200631 break;
632 case 0xc3: /* ret or lret -- ip is correct */
633 case 0xcb:
634 case 0xc2:
635 case 0xca:
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200636 case 0xea: /* jmp absolute -- ip is correct */
637 fix_ip_or_call = 0;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200638 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200639 case 0x9a: /* call absolute - Fix return addr, not ip */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200640 fix_ip_or_call = UPROBE_FIX_CALL;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200641 break;
642 case 0xff:
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200643 switch (MODRM_REG(&insn)) {
644 case 2: case 3: /* call or lcall, indirect */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200645 fix_ip_or_call = UPROBE_FIX_CALL;
646 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200647 case 4: case 5: /* jmp or ljmp, indirect */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200648 fix_ip_or_call = 0;
649 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200650 }
Oleg Nesterove55848a2014-03-31 17:24:14 +0200651 /* fall through */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200652 default:
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200653 riprel_analyze(auprobe, &insn);
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200654 }
655
Oleg Nesterov1dc76e62014-04-25 18:06:19 +0200656 auprobe->def.ilen = insn.length;
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200657 auprobe->def.fixups |= fix_ip_or_call;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100658
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200659 auprobe->ops = &default_xol_ops;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530660 return 0;
661}
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530662
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530663/*
664 * arch_uprobe_pre_xol - prepare to execute out of line.
665 * @auprobe: the probepoint information.
666 * @regs: reflects the saved user state of current task.
667 */
668int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
669{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200670 struct uprobe_task *utask = current->utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530671
Oleg Nesterovdd910162014-04-22 15:20:07 +0200672 if (auprobe->ops->pre_xol) {
673 int err = auprobe->ops->pre_xol(auprobe, regs);
674 if (err)
675 return err;
676 }
677
Oleg Nesterov34e73172014-03-31 19:38:09 +0200678 regs->ip = utask->xol_vaddr;
679 utask->autask.saved_trap_nr = current->thread.trap_nr;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530680 current->thread.trap_nr = UPROBE_TRAP_NR;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530681
Oleg Nesterov34e73172014-03-31 19:38:09 +0200682 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100683 regs->flags |= X86_EFLAGS_TF;
684 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
685 set_task_blockstep(current, false);
686
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530687 return 0;
688}
689
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530690/*
691 * If xol insn itself traps and generates a signal(Say,
692 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
693 * instruction jumps back to its own address. It is assumed that anything
694 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
695 *
696 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
697 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
698 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
699 */
700bool arch_uprobe_xol_was_trapped(struct task_struct *t)
701{
702 if (t->thread.trap_nr != UPROBE_TRAP_NR)
703 return true;
704
705 return false;
706}
707
708/*
709 * Called after single-stepping. To avoid the SMP problems that can
710 * occur when we temporarily put back the original opcode to
711 * single-step, we single-stepped a copy of the instruction.
712 *
713 * This function prepares to resume execution after the single-step.
714 * We have to fix things up as follows:
715 *
716 * Typically, the new ip is relative to the copied instruction. We need
717 * to make it relative to the original instruction (FIX_IP). Exceptions
718 * are return instructions and absolute or indirect jump or call instructions.
719 *
720 * If the single-stepped instruction was a call, the return address that
721 * is atop the stack is the address following the copied instruction. We
722 * need to make it the address following the original instruction (FIX_CALL).
723 *
724 * If the original instruction was a rip-relative instruction such as
725 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
Denys Vlasenko50204c62014-05-01 16:52:46 +0200726 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)".
727 * We need to restore the contents of the scratch register
728 * (FIX_RIP_AX or FIX_RIP_CX).
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530729 */
730int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
731{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200732 struct uprobe_task *utask = current->utask;
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200733 bool send_sigtrap = utask->autask.saved_tf;
734 int err = 0;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530735
736 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200737 current->thread.trap_nr = utask->autask.saved_trap_nr;
Oleg Nesterov014940b2014-04-03 20:20:10 +0200738
739 if (auprobe->ops->post_xol) {
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200740 err = auprobe->ops->post_xol(auprobe, regs);
Oleg Nesterov014940b2014-04-03 20:20:10 +0200741 if (err) {
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200742 /*
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200743 * Restore ->ip for restart or post mortem analysis.
744 * ->post_xol() must not return -ERESTART unless this
745 * is really possible.
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200746 */
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200747 regs->ip = utask->vaddr;
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200748 if (err == -ERESTART)
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200749 err = 0;
750 send_sigtrap = false;
Oleg Nesterov014940b2014-04-03 20:20:10 +0200751 }
752 }
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100753 /*
754 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
755 * so we can get an extra SIGTRAP if we do not clear TF. We need
756 * to examine the opcode to make it right.
757 */
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200758 if (send_sigtrap)
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100759 send_sig(SIGTRAP, current, 0);
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200760
761 if (!utask->autask.saved_tf)
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100762 regs->flags &= ~X86_EFLAGS_TF;
763
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200764 return err;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530765}
766
767/* callback routine for handling exceptions. */
768int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
769{
770 struct die_args *args = data;
771 struct pt_regs *regs = args->regs;
772 int ret = NOTIFY_DONE;
773
774 /* We are only interested in userspace traps */
775 if (regs && !user_mode_vm(regs))
776 return NOTIFY_DONE;
777
778 switch (val) {
779 case DIE_INT3:
780 if (uprobe_pre_sstep_notifier(regs))
781 ret = NOTIFY_STOP;
782
783 break;
784
785 case DIE_DEBUG:
786 if (uprobe_post_sstep_notifier(regs))
787 ret = NOTIFY_STOP;
788
789 default:
790 break;
791 }
792
793 return ret;
794}
795
796/*
797 * This function gets called when XOL instruction either gets trapped or
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200798 * the thread has a fatal signal. Reset the instruction pointer to its
799 * probed address for the potential restart or for post mortem analysis.
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530800 */
801void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
802{
803 struct uprobe_task *utask = current->utask;
804
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200805 if (auprobe->ops->abort)
806 auprobe->ops->abort(auprobe, regs);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100807
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200808 current->thread.trap_nr = utask->autask.saved_trap_nr;
809 regs->ip = utask->vaddr;
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100810 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
811 if (!utask->autask.saved_tf)
812 regs->flags &= ~X86_EFLAGS_TF;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530813}
814
Oleg Nesterov3a4664a2012-09-03 16:05:10 +0200815static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530816{
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200817 if (auprobe->ops->emulate)
818 return auprobe->ops->emulate(auprobe, regs);
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530819 return false;
820}
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +0200821
Oleg Nesterov3a4664a2012-09-03 16:05:10 +0200822bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
823{
824 bool ret = __skip_sstep(auprobe, regs);
825 if (ret && (regs->flags & X86_EFLAGS_TF))
826 send_sig(SIGTRAP, current, 0);
827 return ret;
828}
Anton Arapov791eca12013-04-03 18:00:33 +0200829
830unsigned long
831arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
832{
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200833 int rasize = sizeof_long(), nleft;
Anton Arapov791eca12013-04-03 18:00:33 +0200834 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
835
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200836 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
Anton Arapov791eca12013-04-03 18:00:33 +0200837 return -1;
838
839 /* check whether address has been already hijacked */
840 if (orig_ret_vaddr == trampoline_vaddr)
841 return orig_ret_vaddr;
842
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200843 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
844 if (likely(!nleft))
Anton Arapov791eca12013-04-03 18:00:33 +0200845 return orig_ret_vaddr;
846
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200847 if (nleft != rasize) {
Anton Arapov791eca12013-04-03 18:00:33 +0200848 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
849 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
850
851 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
852 }
853
854 return -1;
855}