Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 2 | * User-space Probes (UProbes) for x86 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2008-2011 |
| 19 | * Authors: |
| 20 | * Srikar Dronamraju |
| 21 | * Jim Keniston |
| 22 | */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 23 | #include <linux/kernel.h> |
| 24 | #include <linux/sched.h> |
| 25 | #include <linux/ptrace.h> |
| 26 | #include <linux/uprobes.h> |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 27 | #include <linux/uaccess.h> |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 28 | |
| 29 | #include <linux/kdebug.h> |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 30 | #include <asm/processor.h> |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 31 | #include <asm/insn.h> |
| 32 | |
| 33 | /* Post-execution fixups. */ |
| 34 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 35 | /* Adjust IP back to vicinity of actual insn */ |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 36 | #define UPROBE_FIX_IP 0x1 |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 37 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 38 | /* Adjust the return address of a call insn */ |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 39 | #define UPROBE_FIX_CALL 0x2 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 40 | |
Sebastian Andrzej Siewior | bdc1e47 | 2012-08-20 12:47:34 +0200 | [diff] [blame] | 41 | /* Instruction will modify TF, don't change it */ |
| 42 | #define UPROBE_FIX_SETF 0x4 |
| 43 | |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 44 | #define UPROBE_FIX_RIP_AX 0x8000 |
| 45 | #define UPROBE_FIX_RIP_CX 0x4000 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 46 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 47 | #define UPROBE_TRAP_NR UINT_MAX |
| 48 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 49 | /* Adaptations for mhiramat x86 decoder v14. */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 50 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) |
| 51 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) |
| 52 | #define OPCODE3(insn) ((insn)->opcode.bytes[2]) |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 53 | #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 54 | |
| 55 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
| 56 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
| 57 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ |
| 58 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ |
| 59 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ |
| 60 | << (row % 32)) |
| 61 | |
Srikar Dronamraju | 04a3d98 | 2012-02-22 14:45:35 +0530 | [diff] [blame] | 62 | /* |
| 63 | * Good-instruction tables for 32-bit apps. This is non-const and volatile |
| 64 | * to keep gcc from statically optimizing it out, as variable_test_bit makes |
| 65 | * some versions of gcc to think only *(unsigned long*) is used. |
| 66 | */ |
Oleg Nesterov | 8dbacad | 2014-04-19 16:07:15 +0200 | [diff] [blame] | 67 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
Srikar Dronamraju | 04a3d98 | 2012-02-22 14:45:35 +0530 | [diff] [blame] | 68 | static volatile u32 good_insns_32[256 / 32] = { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 69 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 70 | /* ---------------------------------------------- */ |
| 71 | W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */ |
| 72 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ |
| 73 | W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */ |
| 74 | W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */ |
| 75 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ |
| 76 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ |
| 77 | W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ |
| 78 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ |
| 79 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ |
| 80 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 81 | W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ |
| 82 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ |
| 83 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ |
| 84 | W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ |
| 85 | W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ |
| 86 | W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ |
| 87 | /* ---------------------------------------------- */ |
| 88 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 89 | }; |
Oleg Nesterov | 8dbacad | 2014-04-19 16:07:15 +0200 | [diff] [blame] | 90 | #else |
| 91 | #define good_insns_32 NULL |
| 92 | #endif |
| 93 | |
| 94 | /* Good-instruction tables for 64-bit apps */ |
| 95 | #if defined(CONFIG_X86_64) |
| 96 | static volatile u32 good_insns_64[256 / 32] = { |
| 97 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 98 | /* ---------------------------------------------- */ |
| 99 | W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */ |
| 100 | W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ |
| 101 | W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */ |
| 102 | W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */ |
| 103 | W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ |
| 104 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ |
| 105 | W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ |
| 106 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ |
| 107 | W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ |
| 108 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 109 | W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ |
| 110 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ |
| 111 | W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ |
| 112 | W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ |
| 113 | W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ |
| 114 | W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ |
| 115 | /* ---------------------------------------------- */ |
| 116 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 117 | }; |
| 118 | #else |
| 119 | #define good_insns_64 NULL |
| 120 | #endif |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 121 | |
| 122 | /* Using this for both 64-bit and 32-bit apps */ |
Srikar Dronamraju | 04a3d98 | 2012-02-22 14:45:35 +0530 | [diff] [blame] | 123 | static volatile u32 good_2byte_insns[256 / 32] = { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 124 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 125 | /* ---------------------------------------------- */ |
| 126 | W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ |
| 127 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ |
| 128 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ |
| 129 | W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ |
| 130 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ |
| 131 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ |
| 132 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ |
| 133 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ |
| 134 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ |
| 135 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 136 | W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ |
| 137 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ |
| 138 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ |
| 139 | W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ |
| 140 | W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ |
| 141 | W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ |
| 142 | /* ---------------------------------------------- */ |
| 143 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 144 | }; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 145 | #undef W |
| 146 | |
| 147 | /* |
| 148 | * opcodes we'll probably never support: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 149 | * |
| 150 | * 6c-6d, e4-e5, ec-ed - in |
| 151 | * 6e-6f, e6-e7, ee-ef - out |
| 152 | * cc, cd - int3, int |
| 153 | * cf - iret |
| 154 | * d6 - illegal instruction |
| 155 | * f1 - int1/icebp |
| 156 | * f4 - hlt |
| 157 | * fa, fb - cli, sti |
| 158 | * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 159 | * |
| 160 | * invalid opcodes in 64-bit mode: |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 161 | * |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 162 | * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5 |
| 163 | * 63 - we support this opcode in x86_64 but not in i386. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 164 | * |
| 165 | * opcodes we may need to refine support for: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 166 | * |
| 167 | * 0f - 2-byte instructions: For many of these instructions, the validity |
| 168 | * depends on the prefix and/or the reg field. On such instructions, we |
| 169 | * just consider the opcode combination valid if it corresponds to any |
| 170 | * valid instruction. |
| 171 | * |
| 172 | * 8f - Group 1 - only reg = 0 is OK |
| 173 | * c6-c7 - Group 11 - only reg = 0 is OK |
| 174 | * d9-df - fpu insns with some illegal encodings |
| 175 | * f2, f3 - repnz, repz prefixes. These are also the first byte for |
| 176 | * certain floating-point instructions, such as addsd. |
| 177 | * |
| 178 | * fe - Group 4 - only reg = 0 or 1 is OK |
| 179 | * ff - Group 5 - only reg = 0-6 is OK |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 180 | * |
| 181 | * others -- Do we need to support these? |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 182 | * |
| 183 | * 0f - (floating-point?) prefetch instructions |
| 184 | * 07, 17, 1f - pop es, pop ss, pop ds |
| 185 | * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes -- |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 186 | * but 64 and 65 (fs: and gs:) seem to be used, so we support them |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 187 | * 67 - addr16 prefix |
| 188 | * ce - into |
| 189 | * f0 - lock prefix |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 190 | */ |
| 191 | |
| 192 | /* |
| 193 | * TODO: |
| 194 | * - Where necessary, examine the modrm byte and allow only valid instructions |
| 195 | * in the different Groups and fpu instructions. |
| 196 | */ |
| 197 | |
| 198 | static bool is_prefix_bad(struct insn *insn) |
| 199 | { |
| 200 | int i; |
| 201 | |
| 202 | for (i = 0; i < insn->prefixes.nbytes; i++) { |
| 203 | switch (insn->prefixes.bytes[i]) { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 204 | case 0x26: /* INAT_PFX_ES */ |
| 205 | case 0x2E: /* INAT_PFX_CS */ |
| 206 | case 0x36: /* INAT_PFX_DS */ |
| 207 | case 0x3E: /* INAT_PFX_SS */ |
| 208 | case 0xF0: /* INAT_PFX_LOCK */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 209 | return true; |
| 210 | } |
| 211 | } |
| 212 | return false; |
| 213 | } |
| 214 | |
Oleg Nesterov | 73175d0 | 2014-04-19 12:34:02 +0200 | [diff] [blame] | 215 | static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 216 | { |
Oleg Nesterov | 73175d0 | 2014-04-19 12:34:02 +0200 | [diff] [blame] | 217 | u32 volatile *good_insns; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 218 | |
Oleg Nesterov | 73175d0 | 2014-04-19 12:34:02 +0200 | [diff] [blame] | 219 | insn_init(insn, auprobe->insn, x86_64); |
Oleg Nesterov | ff26196 | 2014-04-19 14:15:27 +0200 | [diff] [blame] | 220 | /* has the side-effect of processing the entire instruction */ |
| 221 | insn_get_length(insn); |
| 222 | if (WARN_ON_ONCE(!insn_complete(insn))) |
| 223 | return -ENOEXEC; |
Oleg Nesterov | 73175d0 | 2014-04-19 12:34:02 +0200 | [diff] [blame] | 224 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 225 | if (is_prefix_bad(insn)) |
| 226 | return -ENOTSUPP; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 227 | |
Oleg Nesterov | 73175d0 | 2014-04-19 12:34:02 +0200 | [diff] [blame] | 228 | if (x86_64) |
| 229 | good_insns = good_insns_64; |
| 230 | else |
| 231 | good_insns = good_insns_32; |
| 232 | |
| 233 | if (test_bit(OPCODE1(insn), (unsigned long *)good_insns)) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 234 | return 0; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 235 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 236 | if (insn->opcode.nbytes == 2) { |
| 237 | if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns)) |
| 238 | return 0; |
| 239 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 240 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 241 | return -ENOTSUPP; |
| 242 | } |
| 243 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 244 | #ifdef CONFIG_X86_64 |
Oleg Nesterov | 2ae1f49 | 2014-04-19 14:03:05 +0200 | [diff] [blame] | 245 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 246 | { |
| 247 | return !config_enabled(CONFIG_IA32_EMULATION) || |
Oleg Nesterov | b24dc8d | 2014-04-19 18:10:09 +0200 | [diff] [blame] | 248 | !(mm->context.ia32_compat == TIF_IA32); |
Oleg Nesterov | 2ae1f49 | 2014-04-19 14:03:05 +0200 | [diff] [blame] | 249 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 250 | /* |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 251 | * If arch_uprobe->insn doesn't use rip-relative addressing, return |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 252 | * immediately. Otherwise, rewrite the instruction so that it accesses |
| 253 | * its memory operand indirectly through a scratch register. Set |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 254 | * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 255 | * accordingly. (The contents of the scratch register will be saved |
| 256 | * before we single-step the modified instruction, and restored |
| 257 | * afterward.) |
| 258 | * |
| 259 | * We do this because a rip-relative instruction can access only a |
| 260 | * relatively small area (+/- 2 GB from the instruction), and the XOL |
| 261 | * area typically lies beyond that area. At least for instructions |
| 262 | * that store to memory, we can't execute the original instruction |
| 263 | * and "fix things up" later, because the misdirected store could be |
| 264 | * disastrous. |
| 265 | * |
| 266 | * Some useful facts about rip-relative instructions: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 267 | * |
| 268 | * - There's always a modrm byte. |
| 269 | * - There's never a SIB byte. |
| 270 | * - The displacement is always 4 bytes. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 271 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 272 | static void |
Oleg Nesterov | 59078d4b | 2014-03-31 18:09:36 +0200 | [diff] [blame] | 273 | handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 274 | { |
| 275 | u8 *cursor; |
| 276 | u8 reg; |
| 277 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 278 | if (!insn_rip_relative(insn)) |
| 279 | return; |
| 280 | |
| 281 | /* |
| 282 | * insn_rip_relative() would have decoded rex_prefix, modrm. |
| 283 | * Clear REX.b bit (extension of MODRM.rm field): |
| 284 | * we want to encode rax/rcx, not r8/r9. |
| 285 | */ |
| 286 | if (insn->rex_prefix.nbytes) { |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 287 | cursor = auprobe->insn + insn_offset_rex_prefix(insn); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 288 | *cursor &= 0xfe; /* Clearing REX.B bit */ |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * Point cursor at the modrm byte. The next 4 bytes are the |
| 293 | * displacement. Beyond the displacement, for some instructions, |
| 294 | * is the immediate operand. |
| 295 | */ |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 296 | cursor = auprobe->insn + insn_offset_modrm(insn); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 297 | /* |
| 298 | * Convert from rip-relative addressing to indirect addressing |
| 299 | * via a scratch register. Change the r/m field from 0x5 (%rip) |
| 300 | * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field. |
| 301 | */ |
| 302 | reg = MODRM_REG(insn); |
| 303 | if (reg == 0) { |
| 304 | /* |
| 305 | * The register operand (if any) is either the A register |
| 306 | * (%rax, %eax, etc.) or (if the 0x4 bit is set in the |
| 307 | * REX prefix) %r8. In any case, we know the C register |
| 308 | * is NOT the register operand, so we use %rcx (register |
| 309 | * #1) for the scratch register. |
| 310 | */ |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 311 | auprobe->fixups = UPROBE_FIX_RIP_CX; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 312 | /* Change modrm from 00 000 101 to 00 000 001. */ |
| 313 | *cursor = 0x1; |
| 314 | } else { |
| 315 | /* Use %rax (register #0) for the scratch register. */ |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 316 | auprobe->fixups = UPROBE_FIX_RIP_AX; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 317 | /* Change modrm from 00 xxx 101 to 00 xxx 000 */ |
| 318 | *cursor = (reg << 3); |
| 319 | } |
| 320 | |
| 321 | /* Target address = address of next instruction + (signed) offset */ |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 322 | auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 323 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 324 | /* Displacement field is gone; slide immediate field (if any) over. */ |
| 325 | if (insn->immediate.nbytes) { |
| 326 | cursor++; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 327 | memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 328 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 329 | } |
| 330 | |
Oleg Nesterov | d20737c | 2014-03-31 18:35:09 +0200 | [diff] [blame] | 331 | /* |
| 332 | * If we're emulating a rip-relative instruction, save the contents |
| 333 | * of the scratch register and store the target address in that register. |
| 334 | */ |
| 335 | static void |
| 336 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, |
| 337 | struct arch_uprobe_task *autask) |
| 338 | { |
| 339 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) { |
| 340 | autask->saved_scratch_register = regs->ax; |
| 341 | regs->ax = current->utask->vaddr; |
| 342 | regs->ax += auprobe->rip_rela_target_address; |
| 343 | } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { |
| 344 | autask->saved_scratch_register = regs->cx; |
| 345 | regs->cx = current->utask->vaddr; |
| 346 | regs->cx += auprobe->rip_rela_target_address; |
| 347 | } |
| 348 | } |
| 349 | |
| 350 | static void |
| 351 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) |
| 352 | { |
| 353 | if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { |
| 354 | struct arch_uprobe_task *autask; |
| 355 | |
| 356 | autask = ¤t->utask->autask; |
| 357 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) |
| 358 | regs->ax = autask->saved_scratch_register; |
| 359 | else |
| 360 | regs->cx = autask->saved_scratch_register; |
| 361 | |
| 362 | /* |
| 363 | * The original instruction includes a displacement, and so |
| 364 | * is 4 bytes longer than what we've just single-stepped. |
| 365 | * Caller may need to apply other fixups to handle stuff |
| 366 | * like "jmpq *...(%rip)" and "callq *...(%rip)". |
| 367 | */ |
| 368 | if (correction) |
| 369 | *correction += 4; |
| 370 | } |
| 371 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 372 | #else /* 32-bit: */ |
Oleg Nesterov | 2ae1f49 | 2014-04-19 14:03:05 +0200 | [diff] [blame] | 373 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 374 | { |
| 375 | return false; |
| 376 | } |
Oleg Nesterov | d20737c | 2014-03-31 18:35:09 +0200 | [diff] [blame] | 377 | /* |
| 378 | * No RIP-relative addressing on 32-bit |
| 379 | */ |
Oleg Nesterov | 59078d4b | 2014-03-31 18:09:36 +0200 | [diff] [blame] | 380 | static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 381 | { |
Oleg Nesterov | d20737c | 2014-03-31 18:35:09 +0200 | [diff] [blame] | 382 | } |
| 383 | static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, |
| 384 | struct arch_uprobe_task *autask) |
| 385 | { |
| 386 | } |
| 387 | static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, |
| 388 | long *correction) |
| 389 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 390 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 391 | #endif /* CONFIG_X86_64 */ |
| 392 | |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 393 | struct uprobe_xol_ops { |
| 394 | bool (*emulate)(struct arch_uprobe *, struct pt_regs *); |
| 395 | int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); |
| 396 | int (*post_xol)(struct arch_uprobe *, struct pt_regs *); |
| 397 | }; |
| 398 | |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 399 | static inline int sizeof_long(void) |
| 400 | { |
| 401 | return is_ia32_task() ? 4 : 8; |
| 402 | } |
| 403 | |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 404 | static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 405 | { |
| 406 | pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask); |
| 407 | return 0; |
| 408 | } |
| 409 | |
| 410 | /* |
| 411 | * Adjust the return address pushed by a call insn executed out of line. |
| 412 | */ |
| 413 | static int adjust_ret_addr(unsigned long sp, long correction) |
| 414 | { |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 415 | int rasize = sizeof_long(); |
| 416 | long ra; |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 417 | |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 418 | if (copy_from_user(&ra, (void __user *)sp, rasize)) |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 419 | return -EFAULT; |
| 420 | |
| 421 | ra += correction; |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 422 | if (copy_to_user((void __user *)sp, &ra, rasize)) |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 423 | return -EFAULT; |
| 424 | |
| 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 429 | { |
| 430 | struct uprobe_task *utask = current->utask; |
| 431 | long correction = (long)(utask->vaddr - utask->xol_vaddr); |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 432 | |
| 433 | handle_riprel_post_xol(auprobe, regs, &correction); |
| 434 | if (auprobe->fixups & UPROBE_FIX_IP) |
| 435 | regs->ip += correction; |
| 436 | |
Oleg Nesterov | 75f9ef0 | 2014-04-03 20:52:19 +0200 | [diff] [blame] | 437 | if (auprobe->fixups & UPROBE_FIX_CALL) { |
| 438 | if (adjust_ret_addr(regs->sp, correction)) { |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 439 | regs->sp += sizeof_long(); |
Oleg Nesterov | 75f9ef0 | 2014-04-03 20:52:19 +0200 | [diff] [blame] | 440 | return -ERESTART; |
| 441 | } |
| 442 | } |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 443 | |
Oleg Nesterov | 75f9ef0 | 2014-04-03 20:52:19 +0200 | [diff] [blame] | 444 | return 0; |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | static struct uprobe_xol_ops default_xol_ops = { |
| 448 | .pre_xol = default_pre_xol_op, |
| 449 | .post_xol = default_post_xol_op, |
| 450 | }; |
| 451 | |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 452 | static bool branch_is_call(struct arch_uprobe *auprobe) |
| 453 | { |
| 454 | return auprobe->branch.opc1 == 0xe8; |
| 455 | } |
| 456 | |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 457 | #define CASE_COND \ |
| 458 | COND(70, 71, XF(OF)) \ |
| 459 | COND(72, 73, XF(CF)) \ |
| 460 | COND(74, 75, XF(ZF)) \ |
| 461 | COND(78, 79, XF(SF)) \ |
| 462 | COND(7a, 7b, XF(PF)) \ |
| 463 | COND(76, 77, XF(CF) || XF(ZF)) \ |
| 464 | COND(7c, 7d, XF(SF) != XF(OF)) \ |
| 465 | COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF)) |
| 466 | |
| 467 | #define COND(op_y, op_n, expr) \ |
| 468 | case 0x ## op_y: DO((expr) != 0) \ |
| 469 | case 0x ## op_n: DO((expr) == 0) |
| 470 | |
| 471 | #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf)) |
| 472 | |
| 473 | static bool is_cond_jmp_opcode(u8 opcode) |
| 474 | { |
| 475 | switch (opcode) { |
| 476 | #define DO(expr) \ |
| 477 | return true; |
| 478 | CASE_COND |
| 479 | #undef DO |
| 480 | |
| 481 | default: |
| 482 | return false; |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 487 | { |
| 488 | unsigned long flags = regs->flags; |
| 489 | |
| 490 | switch (auprobe->branch.opc1) { |
| 491 | #define DO(expr) \ |
| 492 | return expr; |
| 493 | CASE_COND |
| 494 | #undef DO |
| 495 | |
| 496 | default: /* not a conditional jmp */ |
| 497 | return true; |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | #undef XF |
| 502 | #undef COND |
| 503 | #undef CASE_COND |
| 504 | |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 505 | static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 506 | { |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 507 | unsigned long new_ip = regs->ip += auprobe->branch.ilen; |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 508 | unsigned long offs = (long)auprobe->branch.offs; |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 509 | |
| 510 | if (branch_is_call(auprobe)) { |
| 511 | unsigned long new_sp = regs->sp - sizeof_long(); |
| 512 | /* |
| 513 | * If it fails we execute this (mangled, see the comment in |
| 514 | * branch_clear_offset) insn out-of-line. In the likely case |
| 515 | * this should trigger the trap, and the probed application |
| 516 | * should die or restart the same insn after it handles the |
| 517 | * signal, arch_uprobe_post_xol() won't be even called. |
| 518 | * |
| 519 | * But there is corner case, see the comment in ->post_xol(). |
| 520 | */ |
| 521 | if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long())) |
| 522 | return false; |
| 523 | regs->sp = new_sp; |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 524 | } else if (!check_jmp_cond(auprobe, regs)) { |
| 525 | offs = 0; |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 528 | regs->ip = new_ip + offs; |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 529 | return true; |
| 530 | } |
| 531 | |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 532 | static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 533 | { |
| 534 | BUG_ON(!branch_is_call(auprobe)); |
| 535 | /* |
| 536 | * We can only get here if branch_emulate_op() failed to push the ret |
| 537 | * address _and_ another thread expanded our stack before the (mangled) |
| 538 | * "call" insn was executed out-of-line. Just restore ->sp and restart. |
| 539 | * We could also restore ->ip and try to call branch_emulate_op() again. |
| 540 | */ |
| 541 | regs->sp += sizeof_long(); |
| 542 | return -ERESTART; |
| 543 | } |
| 544 | |
| 545 | static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn) |
| 546 | { |
| 547 | /* |
| 548 | * Turn this insn into "call 1f; 1:", this is what we will execute |
| 549 | * out-of-line if ->emulate() fails. We only need this to generate |
| 550 | * a trap, so that the probed task receives the correct signal with |
| 551 | * the properly filled siginfo. |
| 552 | * |
| 553 | * But see the comment in ->post_xol(), in the unlikely case it can |
| 554 | * succeed. So we need to ensure that the new ->ip can not fall into |
| 555 | * the non-canonical area and trigger #GP. |
| 556 | * |
| 557 | * We could turn it into (say) "pushf", but then we would need to |
| 558 | * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte |
| 559 | * of ->insn[] for set_orig_insn(). |
| 560 | */ |
| 561 | memset(auprobe->insn + insn_offset_immediate(insn), |
| 562 | 0, insn->immediate.nbytes); |
| 563 | } |
| 564 | |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 565 | static struct uprobe_xol_ops branch_xol_ops = { |
| 566 | .emulate = branch_emulate_op, |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 567 | .post_xol = branch_post_xol_op, |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 568 | }; |
| 569 | |
| 570 | /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */ |
| 571 | static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) |
| 572 | { |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 573 | u8 opc1 = OPCODE1(insn); |
Denys Vlasenko | 250bbd1 | 2014-04-24 19:08:24 +0200 | [diff] [blame] | 574 | int i; |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 575 | |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 576 | switch (opc1) { |
| 577 | case 0xeb: /* jmp 8 */ |
| 578 | case 0xe9: /* jmp 32 */ |
| 579 | case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ |
| 580 | break; |
| 581 | |
| 582 | case 0xe8: /* call relative */ |
| 583 | branch_clear_offset(auprobe, insn); |
| 584 | break; |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 585 | |
Oleg Nesterov | 6cc5e7f | 2014-04-07 16:22:58 +0200 | [diff] [blame] | 586 | case 0x0f: |
| 587 | if (insn->opcode.nbytes != 2) |
| 588 | return -ENOSYS; |
| 589 | /* |
| 590 | * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches |
| 591 | * OPCODE1() of the "short" jmp which checks the same condition. |
| 592 | */ |
| 593 | opc1 = OPCODE2(insn) - 0x10; |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 594 | default: |
Oleg Nesterov | 8f95505 | 2014-04-06 21:53:47 +0200 | [diff] [blame] | 595 | if (!is_cond_jmp_opcode(opc1)) |
| 596 | return -ENOSYS; |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 597 | } |
| 598 | |
Denys Vlasenko | 250bbd1 | 2014-04-24 19:08:24 +0200 | [diff] [blame] | 599 | /* |
| 600 | * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. |
| 601 | * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. |
| 602 | * No one uses these insns, reject any branch insns with such prefix. |
| 603 | */ |
| 604 | for (i = 0; i < insn->prefixes.nbytes; i++) { |
| 605 | if (insn->prefixes.bytes[i] == 0x66) |
| 606 | return -ENOTSUPP; |
| 607 | } |
| 608 | |
Oleg Nesterov | 8e89c0b | 2014-04-06 18:11:02 +0200 | [diff] [blame] | 609 | auprobe->branch.opc1 = opc1; |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 610 | auprobe->branch.ilen = insn->length; |
| 611 | auprobe->branch.offs = insn->immediate.value; |
| 612 | |
| 613 | auprobe->ops = &branch_xol_ops; |
| 614 | return 0; |
| 615 | } |
| 616 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 617 | /** |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 618 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 619 | * @mm: the probed address space. |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 620 | * @arch_uprobe: the probepoint information. |
Ananth N Mavinakayanahalli | 7eb9ba5 | 2012-06-08 15:02:57 +0530 | [diff] [blame] | 621 | * @addr: virtual address at which to install the probepoint |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 622 | * Return 0 on success or a -ve number on error. |
| 623 | */ |
Ananth N Mavinakayanahalli | 7eb9ba5 | 2012-06-08 15:02:57 +0530 | [diff] [blame] | 624 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 625 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 626 | struct insn insn; |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 627 | bool fix_ip = true, fix_call = false; |
| 628 | int ret; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 629 | |
Oleg Nesterov | 2ae1f49 | 2014-04-19 14:03:05 +0200 | [diff] [blame] | 630 | ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 631 | if (ret) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 632 | return ret; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 633 | |
Oleg Nesterov | 7ba6db2 | 2014-04-05 20:05:02 +0200 | [diff] [blame] | 634 | ret = branch_setup_xol_ops(auprobe, &insn); |
| 635 | if (ret != -ENOSYS) |
| 636 | return ret; |
| 637 | |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 638 | /* |
| 639 | * Figure out which fixups arch_uprobe_post_xol() will need to perform, |
| 640 | * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups |
| 641 | * is either zero or it reflects rip-related fixups. |
| 642 | */ |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 643 | switch (OPCODE1(&insn)) { |
| 644 | case 0x9d: /* popf */ |
| 645 | auprobe->fixups |= UPROBE_FIX_SETF; |
| 646 | break; |
| 647 | case 0xc3: /* ret or lret -- ip is correct */ |
| 648 | case 0xcb: |
| 649 | case 0xc2: |
| 650 | case 0xca: |
| 651 | fix_ip = false; |
| 652 | break; |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 653 | case 0x9a: /* call absolute - Fix return addr, not ip */ |
| 654 | fix_call = true; |
| 655 | fix_ip = false; |
| 656 | break; |
| 657 | case 0xea: /* jmp absolute -- ip is correct */ |
| 658 | fix_ip = false; |
| 659 | break; |
| 660 | case 0xff: |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 661 | switch (MODRM_REG(&insn)) { |
| 662 | case 2: case 3: /* call or lcall, indirect */ |
| 663 | fix_call = true; |
| 664 | case 4: case 5: /* jmp or ljmp, indirect */ |
| 665 | fix_ip = false; |
| 666 | } |
Oleg Nesterov | e55848a | 2014-03-31 17:24:14 +0200 | [diff] [blame] | 667 | /* fall through */ |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 668 | default: |
Oleg Nesterov | e55848a | 2014-03-31 17:24:14 +0200 | [diff] [blame] | 669 | handle_riprel_insn(auprobe, &insn); |
Oleg Nesterov | ddb69f2 | 2014-03-31 15:16:22 +0200 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | if (fix_ip) |
| 673 | auprobe->fixups |= UPROBE_FIX_IP; |
| 674 | if (fix_call) |
| 675 | auprobe->fixups |= UPROBE_FIX_CALL; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 676 | |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 677 | auprobe->ops = &default_xol_ops; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 678 | return 0; |
| 679 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 680 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 681 | /* |
| 682 | * arch_uprobe_pre_xol - prepare to execute out of line. |
| 683 | * @auprobe: the probepoint information. |
| 684 | * @regs: reflects the saved user state of current task. |
| 685 | */ |
| 686 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 687 | { |
Oleg Nesterov | 34e7317 | 2014-03-31 19:38:09 +0200 | [diff] [blame] | 688 | struct uprobe_task *utask = current->utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 689 | |
Oleg Nesterov | dd91016 | 2014-04-22 15:20:07 +0200 | [diff] [blame^] | 690 | if (auprobe->ops->pre_xol) { |
| 691 | int err = auprobe->ops->pre_xol(auprobe, regs); |
| 692 | if (err) |
| 693 | return err; |
| 694 | } |
| 695 | |
Oleg Nesterov | 34e7317 | 2014-03-31 19:38:09 +0200 | [diff] [blame] | 696 | regs->ip = utask->xol_vaddr; |
| 697 | utask->autask.saved_trap_nr = current->thread.trap_nr; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 698 | current->thread.trap_nr = UPROBE_TRAP_NR; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 699 | |
Oleg Nesterov | 34e7317 | 2014-03-31 19:38:09 +0200 | [diff] [blame] | 700 | utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF); |
Oleg Nesterov | 4dc316c | 2012-10-28 17:57:30 +0100 | [diff] [blame] | 701 | regs->flags |= X86_EFLAGS_TF; |
| 702 | if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) |
| 703 | set_task_blockstep(current, false); |
| 704 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 705 | return 0; |
| 706 | } |
| 707 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 708 | /* |
| 709 | * If xol insn itself traps and generates a signal(Say, |
| 710 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped |
| 711 | * instruction jumps back to its own address. It is assumed that anything |
| 712 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. |
| 713 | * |
| 714 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, |
| 715 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to |
| 716 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). |
| 717 | */ |
| 718 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) |
| 719 | { |
| 720 | if (t->thread.trap_nr != UPROBE_TRAP_NR) |
| 721 | return true; |
| 722 | |
| 723 | return false; |
| 724 | } |
| 725 | |
| 726 | /* |
| 727 | * Called after single-stepping. To avoid the SMP problems that can |
| 728 | * occur when we temporarily put back the original opcode to |
| 729 | * single-step, we single-stepped a copy of the instruction. |
| 730 | * |
| 731 | * This function prepares to resume execution after the single-step. |
| 732 | * We have to fix things up as follows: |
| 733 | * |
| 734 | * Typically, the new ip is relative to the copied instruction. We need |
| 735 | * to make it relative to the original instruction (FIX_IP). Exceptions |
| 736 | * are return instructions and absolute or indirect jump or call instructions. |
| 737 | * |
| 738 | * If the single-stepped instruction was a call, the return address that |
| 739 | * is atop the stack is the address following the copied instruction. We |
| 740 | * need to make it the address following the original instruction (FIX_CALL). |
| 741 | * |
| 742 | * If the original instruction was a rip-relative instruction such as |
| 743 | * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent |
| 744 | * instruction using a scratch register -- e.g., "movl %edx,(%rax)". |
| 745 | * We need to restore the contents of the scratch register and adjust |
| 746 | * the ip, keeping in mind that the instruction we executed is 4 bytes |
| 747 | * shorter than the original instruction (since we squeezed out the offset |
| 748 | * field). (FIX_RIP_AX or FIX_RIP_CX) |
| 749 | */ |
| 750 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 751 | { |
Oleg Nesterov | 34e7317 | 2014-03-31 19:38:09 +0200 | [diff] [blame] | 752 | struct uprobe_task *utask = current->utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 753 | |
| 754 | WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 755 | |
| 756 | if (auprobe->ops->post_xol) { |
| 757 | int err = auprobe->ops->post_xol(auprobe, regs); |
| 758 | if (err) { |
| 759 | arch_uprobe_abort_xol(auprobe, regs); |
Oleg Nesterov | 75f9ef0 | 2014-04-03 20:52:19 +0200 | [diff] [blame] | 760 | /* |
| 761 | * Restart the probed insn. ->post_xol() must ensure |
| 762 | * this is really possible if it returns -ERESTART. |
| 763 | */ |
| 764 | if (err == -ERESTART) |
| 765 | return 0; |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 766 | return err; |
| 767 | } |
| 768 | } |
| 769 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 770 | current->thread.trap_nr = utask->autask.saved_trap_nr; |
Oleg Nesterov | 4dc316c | 2012-10-28 17:57:30 +0100 | [diff] [blame] | 771 | /* |
| 772 | * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP |
| 773 | * so we can get an extra SIGTRAP if we do not clear TF. We need |
| 774 | * to examine the opcode to make it right. |
| 775 | */ |
| 776 | if (utask->autask.saved_tf) |
| 777 | send_sig(SIGTRAP, current, 0); |
| 778 | else if (!(auprobe->fixups & UPROBE_FIX_SETF)) |
| 779 | regs->flags &= ~X86_EFLAGS_TF; |
| 780 | |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 781 | return 0; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 782 | } |
| 783 | |
| 784 | /* callback routine for handling exceptions. */ |
| 785 | int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) |
| 786 | { |
| 787 | struct die_args *args = data; |
| 788 | struct pt_regs *regs = args->regs; |
| 789 | int ret = NOTIFY_DONE; |
| 790 | |
| 791 | /* We are only interested in userspace traps */ |
| 792 | if (regs && !user_mode_vm(regs)) |
| 793 | return NOTIFY_DONE; |
| 794 | |
| 795 | switch (val) { |
| 796 | case DIE_INT3: |
| 797 | if (uprobe_pre_sstep_notifier(regs)) |
| 798 | ret = NOTIFY_STOP; |
| 799 | |
| 800 | break; |
| 801 | |
| 802 | case DIE_DEBUG: |
| 803 | if (uprobe_post_sstep_notifier(regs)) |
| 804 | ret = NOTIFY_STOP; |
| 805 | |
| 806 | default: |
| 807 | break; |
| 808 | } |
| 809 | |
| 810 | return ret; |
| 811 | } |
| 812 | |
| 813 | /* |
| 814 | * This function gets called when XOL instruction either gets trapped or |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 815 | * the thread has a fatal signal, or if arch_uprobe_post_xol() failed. |
| 816 | * Reset the instruction pointer to its probed address for the potential |
| 817 | * restart or for post mortem analysis. |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 818 | */ |
| 819 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 820 | { |
| 821 | struct uprobe_task *utask = current->utask; |
| 822 | |
| 823 | current->thread.trap_nr = utask->autask.saved_trap_nr; |
| 824 | handle_riprel_post_xol(auprobe, regs, NULL); |
| 825 | instruction_pointer_set(regs, utask->vaddr); |
Oleg Nesterov | 4dc316c | 2012-10-28 17:57:30 +0100 | [diff] [blame] | 826 | |
| 827 | /* clear TF if it was set by us in arch_uprobe_pre_xol() */ |
| 828 | if (!utask->autask.saved_tf) |
| 829 | regs->flags &= ~X86_EFLAGS_TF; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 830 | } |
| 831 | |
Oleg Nesterov | 3a4664a | 2012-09-03 16:05:10 +0200 | [diff] [blame] | 832 | static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 833 | { |
Oleg Nesterov | 8ad8e9d | 2014-03-31 21:01:31 +0200 | [diff] [blame] | 834 | if (auprobe->ops->emulate) |
| 835 | return auprobe->ops->emulate(auprobe, regs); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 836 | return false; |
| 837 | } |
Sebastian Andrzej Siewior | bdc1e47 | 2012-08-20 12:47:34 +0200 | [diff] [blame] | 838 | |
Oleg Nesterov | 3a4664a | 2012-09-03 16:05:10 +0200 | [diff] [blame] | 839 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
| 840 | { |
| 841 | bool ret = __skip_sstep(auprobe, regs); |
| 842 | if (ret && (regs->flags & X86_EFLAGS_TF)) |
| 843 | send_sig(SIGTRAP, current, 0); |
| 844 | return ret; |
| 845 | } |
Anton Arapov | 791eca1 | 2013-04-03 18:00:33 +0200 | [diff] [blame] | 846 | |
| 847 | unsigned long |
| 848 | arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) |
| 849 | { |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 850 | int rasize = sizeof_long(), nleft; |
Anton Arapov | 791eca1 | 2013-04-03 18:00:33 +0200 | [diff] [blame] | 851 | unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ |
| 852 | |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 853 | if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) |
Anton Arapov | 791eca1 | 2013-04-03 18:00:33 +0200 | [diff] [blame] | 854 | return -1; |
| 855 | |
| 856 | /* check whether address has been already hijacked */ |
| 857 | if (orig_ret_vaddr == trampoline_vaddr) |
| 858 | return orig_ret_vaddr; |
| 859 | |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 860 | nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); |
| 861 | if (likely(!nleft)) |
Anton Arapov | 791eca1 | 2013-04-03 18:00:33 +0200 | [diff] [blame] | 862 | return orig_ret_vaddr; |
| 863 | |
Oleg Nesterov | 8faaed1 | 2014-04-06 17:16:10 +0200 | [diff] [blame] | 864 | if (nleft != rasize) { |
Anton Arapov | 791eca1 | 2013-04-03 18:00:33 +0200 | [diff] [blame] | 865 | pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " |
| 866 | "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); |
| 867 | |
| 868 | force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); |
| 869 | } |
| 870 | |
| 871 | return -1; |
| 872 | } |