blob: d8359ebeea70c26bb7f621a3f8fea1ffb3fbb421 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302/*
Ingo Molnar7b2d81d2012-02-17 09:27:41 +01003 * User-space Probes (UProbes) for x86
Srikar Dronamraju2b144492012-02-09 14:56:42 +05304 *
Srikar Dronamraju2b144492012-02-09 14:56:42 +05305 * Copyright (C) IBM Corporation, 2008-2011
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053010#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/uprobes.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053014#include <linux/uaccess.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053015
16#include <linux/kdebug.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053017#include <asm/processor.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053018#include <asm/insn.h>
Dave Hansenb0e9b092015-06-07 11:37:04 -070019#include <asm/mmu_context.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053020
21/* Post-execution fixups. */
22
Srikar Dronamraju2b144492012-02-09 14:56:42 +053023/* Adjust IP back to vicinity of actual insn */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020024#define UPROBE_FIX_IP 0x01
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053025
Srikar Dronamraju2b144492012-02-09 14:56:42 +053026/* Adjust the return address of a call insn */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020027#define UPROBE_FIX_CALL 0x02
Srikar Dronamraju2b144492012-02-09 14:56:42 +053028
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +020029/* Instruction will modify TF, don't change it */
Oleg Nesterov78d9af42014-04-24 18:52:37 +020030#define UPROBE_FIX_SETF 0x04
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +020031
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +020032#define UPROBE_FIX_RIP_SI 0x08
33#define UPROBE_FIX_RIP_DI 0x10
34#define UPROBE_FIX_RIP_BX 0x20
35#define UPROBE_FIX_RIP_MASK \
36 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
Srikar Dronamraju2b144492012-02-09 14:56:42 +053037
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053038#define UPROBE_TRAP_NR UINT_MAX
39
Srikar Dronamraju2b144492012-02-09 14:56:42 +053040/* Adaptations for mhiramat x86 decoder v14. */
Ingo Molnar7b2d81d2012-02-17 09:27:41 +010041#define OPCODE1(insn) ((insn)->opcode.bytes[0])
42#define OPCODE2(insn) ((insn)->opcode.bytes[1])
43#define OPCODE3(insn) ((insn)->opcode.bytes[2])
Oleg Nesterovddb69f22014-03-31 15:16:22 +020044#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
Srikar Dronamraju2b144492012-02-09 14:56:42 +053045
46#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
51 << (row % 32))
52
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053053/*
54 * Good-instruction tables for 32-bit apps. This is non-const and volatile
55 * to keep gcc from statically optimizing it out, as variable_test_bit makes
56 * some versions of gcc to think only *(unsigned long*) is used.
Denys Vlasenko097f4e52015-02-12 20:18:50 +010057 *
Denys Vlasenko097f4e52015-02-12 20:18:50 +010058 * Opcodes we'll probably never support:
59 * 6c-6f - ins,outs. SEGVs if used in userspace
60 * e4-e7 - in,out imm. SEGVs if used in userspace
61 * ec-ef - in,out acc. SEGVs if used in userspace
62 * cc - int3. SIGTRAP if used in userspace
63 * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
64 * (why we support bound (62) then? it's similar, and similarly unused...)
65 * f1 - int1. SIGTRAP if used in userspace
66 * f4 - hlt. SEGVs if used in userspace
67 * fa - cli. SEGVs if used in userspace
68 * fb - sti. SEGVs if used in userspace
69 *
70 * Opcodes which need some work to be supported:
71 * 07,17,1f - pop es/ss/ds
72 * Normally not used in userspace, but would execute if used.
73 * Can cause GP or stack exception if tries to load wrong segment descriptor.
74 * We hesitate to run them under single step since kernel's handling
75 * of userspace single-stepping (TF flag) is fragile.
76 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
77 * on the same grounds that they are never used.
78 * cd - int N.
79 * Used by userspace for "int 80" syscall entry. (Other "int N"
80 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
81 * Not supported since kernel's handling of userspace single-stepping
82 * (TF flag) is fragile.
83 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053084 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +020085#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
Srikar Dronamraju04a3d982012-02-22 14:45:35 +053086static volatile u32 good_insns_32[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +053087 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
88 /* ---------------------------------------------- */
Denys Vlasenko67fc8092015-02-12 20:18:51 +010089 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053090 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +010091 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053093 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +010095 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +053096 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530105 /* ---------------------------------------------- */
106 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
107};
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200108#else
109#define good_insns_32 NULL
110#endif
111
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100112/* Good-instruction tables for 64-bit apps.
113 *
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100114 * Genuinely invalid opcodes:
115 * 06,07 - formerly push/pop es
116 * 0e - formerly push cs
117 * 16,17 - formerly push/pop ss
118 * 1e,1f - formerly push/pop ds
119 * 27,2f,37,3f - formerly daa/das/aaa/aas
120 * 60,61 - formerly pusha/popa
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100121 * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100122 * 82 - formerly redundant encoding of Group1
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100123 * 9a - formerly call seg:ofs
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100124 * ce - formerly into
125 * d4,d5 - formerly aam/aad
126 * d6 - formerly undocumented salc
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100127 * ea - formerly jmp seg:ofs
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100128 *
129 * Opcodes we'll probably never support:
130 * 6c-6f - ins,outs. SEGVs if used in userspace
131 * e4-e7 - in,out imm. SEGVs if used in userspace
132 * ec-ef - in,out acc. SEGVs if used in userspace
133 * cc - int3. SIGTRAP if used in userspace
134 * f1 - int1. SIGTRAP if used in userspace
135 * f4 - hlt. SEGVs if used in userspace
136 * fa - cli. SEGVs if used in userspace
137 * fb - sti. SEGVs if used in userspace
138 *
139 * Opcodes which need some work to be supported:
140 * cd - int N.
141 * Used by userspace for "int 80" syscall entry. (Other "int N"
142 * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
143 * Not supported since kernel's handling of userspace single-stepping
144 * (TF flag) is fragile.
145 * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
146 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200147#if defined(CONFIG_X86_64)
148static volatile u32 good_insns_64[256 / 32] = {
149 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
150 /* ---------------------------------------------- */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
Denys Vlasenko67fc8092015-02-12 20:18:51 +0100165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
Oleg Nesterov8dbacad2014-04-19 16:07:15 +0200167 /* ---------------------------------------------- */
168 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
169};
170#else
171#define good_insns_64 NULL
172#endif
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530173
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100174/* Using this for both 64-bit and 32-bit apps.
175 * Opcodes we don't support:
176 * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
177 * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
178 * Also encodes tons of other system insns if mod=11.
179 * Some are in fact non-system: xend, xtest, rdtscp, maybe more
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100180 * 0f 05 - syscall
181 * 0f 06 - clts (CPL0 insn)
182 * 0f 07 - sysret
183 * 0f 08 - invd (CPL0 insn)
184 * 0f 09 - wbinvd (CPL0 insn)
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100185 * 0f 0b - ud2
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100186 * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100187 * 0f 34 - sysenter
188 * 0f 35 - sysexit
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100189 * 0f 37 - getsec
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100190 * 0f 78 - vmread (Intel VMX. CPL0 insn)
191 * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
192 * Note: with prefixes, these two opcodes are
193 * extrq/insertq/AVX512 convert vector ops.
194 * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
195 * {rd,wr}{fs,gs}base,{s,l,m}fence.
196 * Why? They are all user-executable.
Denys Vlasenko097f4e52015-02-12 20:18:50 +0100197 */
Srikar Dronamraju04a3d982012-02-22 14:45:35 +0530198static volatile u32 good_2byte_insns[256 / 32] = {
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530199 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
200 /* ---------------------------------------------- */
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
Denys Vlasenko5154d4f2015-02-12 20:18:52 +0100216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530217 /* ---------------------------------------------- */
218 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
219};
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530220#undef W
221
222/*
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530223 * opcodes we may need to refine support for:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100224 *
225 * 0f - 2-byte instructions: For many of these instructions, the validity
226 * depends on the prefix and/or the reg field. On such instructions, we
227 * just consider the opcode combination valid if it corresponds to any
228 * valid instruction.
229 *
230 * 8f - Group 1 - only reg = 0 is OK
231 * c6-c7 - Group 11 - only reg = 0 is OK
232 * d9-df - fpu insns with some illegal encodings
233 * f2, f3 - repnz, repz prefixes. These are also the first byte for
234 * certain floating-point instructions, such as addsd.
235 *
236 * fe - Group 4 - only reg = 0 or 1 is OK
237 * ff - Group 5 - only reg = 0-6 is OK
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530238 *
239 * others -- Do we need to support these?
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100240 *
241 * 0f - (floating-point?) prefetch instructions
242 * 07, 17, 1f - pop es, pop ss, pop ds
243 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530244 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100245 * 67 - addr16 prefix
246 * ce - into
247 * f0 - lock prefix
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530248 */
249
250/*
251 * TODO:
252 * - Where necessary, examine the modrm byte and allow only valid instructions
253 * in the different Groups and fpu instructions.
254 */
255
256static bool is_prefix_bad(struct insn *insn)
257{
258 int i;
259
260 for (i = 0; i < insn->prefixes.nbytes; i++) {
Ricardo Neried40a102017-10-27 13:25:31 -0700261 insn_attr_t attr;
262
263 attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
264 switch (attr) {
265 case INAT_MAKE_PREFIX(INAT_PFX_ES):
266 case INAT_MAKE_PREFIX(INAT_PFX_CS):
267 case INAT_MAKE_PREFIX(INAT_PFX_DS):
268 case INAT_MAKE_PREFIX(INAT_PFX_SS):
269 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530270 return true;
271 }
272 }
273 return false;
274}
275
Oleg Nesterov73175d02014-04-19 12:34:02 +0200276static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530277{
Oleg Nesterov73175d02014-04-19 12:34:02 +0200278 u32 volatile *good_insns;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530279
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800280 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
Oleg Nesterovff261962014-04-19 14:15:27 +0200281 /* has the side-effect of processing the entire instruction */
282 insn_get_length(insn);
Oleg Nesterov90718e32018-05-18 18:27:39 +0200283 if (!insn_complete(insn))
Oleg Nesterovff261962014-04-19 14:15:27 +0200284 return -ENOEXEC;
Oleg Nesterov73175d02014-04-19 12:34:02 +0200285
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530286 if (is_prefix_bad(insn))
287 return -ENOTSUPP;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100288
Masami Hiramatsu13ebe182018-05-09 21:58:45 +0900289 /* We should not singlestep on the exception masking instructions */
290 if (insn_masking_exception(insn))
291 return -ENOTSUPP;
292
Oleg Nesterov73175d02014-04-19 12:34:02 +0200293 if (x86_64)
294 good_insns = good_insns_64;
295 else
296 good_insns = good_insns_32;
297
298 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530299 return 0;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100300
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530301 if (insn->opcode.nbytes == 2) {
302 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
303 return 0;
304 }
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100305
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530306 return -ENOTSUPP;
307}
308
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530309#ifdef CONFIG_X86_64
310/*
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530311 * If arch_uprobe->insn doesn't use rip-relative addressing, return
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530312 * immediately. Otherwise, rewrite the instruction so that it accesses
313 * its memory operand indirectly through a scratch register. Set
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200314 * defparam->fixups accordingly. (The contents of the scratch register
Denys Vlasenko50204c62014-05-01 16:52:46 +0200315 * will be saved before we single-step the modified instruction,
316 * and restored afterward).
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530317 *
318 * We do this because a rip-relative instruction can access only a
319 * relatively small area (+/- 2 GB from the instruction), and the XOL
320 * area typically lies beyond that area. At least for instructions
321 * that store to memory, we can't execute the original instruction
322 * and "fix things up" later, because the misdirected store could be
323 * disastrous.
324 *
325 * Some useful facts about rip-relative instructions:
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100326 *
Denys Vlasenko50204c62014-05-01 16:52:46 +0200327 * - There's always a modrm byte with bit layout "00 reg 101".
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100328 * - There's never a SIB byte.
329 * - The displacement is always 4 bytes.
Denys Vlasenko50204c62014-05-01 16:52:46 +0200330 * - REX.B=1 bit in REX prefix, which normally extends r/m field,
331 * has no effect on rip-relative mode. It doesn't make modrm byte
332 * with r/m=101 refer to register 1101 = R13.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530333 */
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200334static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530335{
336 u8 *cursor;
337 u8 reg;
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200338 u8 reg2;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530339
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530340 if (!insn_rip_relative(insn))
341 return;
342
343 /*
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200344 * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530345 * Clear REX.b bit (extension of MODRM.rm field):
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200346 * we want to encode low numbered reg, not r8+.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530347 */
348 if (insn->rex_prefix.nbytes) {
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530349 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200350 /* REX byte has 0100wrxb layout, clearing REX.b bit */
351 *cursor &= 0xfe;
352 }
353 /*
Denys Vlasenko68187872016-08-11 17:45:21 +0200354 * Similar treatment for VEX3/EVEX prefix.
355 * TODO: add XOP treatment when insn decoder supports them
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200356 */
Denys Vlasenko68187872016-08-11 17:45:21 +0200357 if (insn->vex_prefix.nbytes >= 3) {
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200358 /*
359 * vex2: c5 rvvvvLpp (has no b bit)
360 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
361 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
Denys Vlasenko68187872016-08-11 17:45:21 +0200362 * Setting VEX3.b (setting because it has inverted meaning).
363 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
364 * is the 4th bit of MODRM.rm, and needs the same treatment.
365 * For VEX3-encoded insns, VEX3.x value has no effect in
366 * non-SIB encoding, the change is superfluous but harmless.
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200367 */
368 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
Denys Vlasenko68187872016-08-11 17:45:21 +0200369 *cursor |= 0x60;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530370 }
371
372 /*
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200373 * Convert from rip-relative addressing to register-relative addressing
374 * via a scratch register.
375 *
376 * This is tricky since there are insns with modrm byte
377 * which also use registers not encoded in modrm byte:
378 * [i]div/[i]mul: implicitly use dx:ax
379 * shift ops: implicitly use cx
380 * cmpxchg: implicitly uses ax
381 * cmpxchg8/16b: implicitly uses dx:ax and bx:cx
382 * Encoding: 0f c7/1 modrm
383 * The code below thinks that reg=1 (cx), chooses si as scratch.
384 * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m.
385 * First appeared in Haswell (BMI2 insn). It is vex-encoded.
386 * Example where none of bx,cx,dx can be used as scratch reg:
387 * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx
388 * [v]pcmpistri: implicitly uses cx, xmm0
389 * [v]pcmpistrm: implicitly uses xmm0
390 * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0
391 * [v]pcmpestrm: implicitly uses ax, dx, xmm0
392 * Evil SSE4.2 string comparison ops from hell.
393 * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination.
394 * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm.
395 * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi).
396 * AMD says it has no 3-operand form (vex.vvvv must be 1111)
397 * and that it can have only register operands, not mem
398 * (its modrm byte must have mode=11).
399 * If these restrictions will ever be lifted,
400 * we'll need code to prevent selection of di as scratch reg!
401 *
402 * Summary: I don't know any insns with modrm byte which
403 * use SI register implicitly. DI register is used only
404 * by one insn (maskmovq) and BX register is used
405 * only by one too (cmpxchg8b).
406 * BP is stack-segment based (may be a problem?).
407 * AX, DX, CX are off-limits (many implicit users).
408 * SP is unusable (it's stack pointer - think about "pop mem";
409 * also, rsp+disp32 needs sib encoding -> insn length change).
410 */
411
412 reg = MODRM_REG(insn); /* Fetch modrm.reg */
413 reg2 = 0xff; /* Fetch vex.vvvv */
Denys Vlasenko68187872016-08-11 17:45:21 +0200414 if (insn->vex_prefix.nbytes)
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200415 reg2 = insn->vex_prefix.bytes[2];
416 /*
Denys Vlasenko68187872016-08-11 17:45:21 +0200417 * TODO: add XOP vvvv reading.
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200418 *
419 * vex.vvvv field is in bits 6-3, bits are inverted.
420 * But in 32-bit mode, high-order bit may be ignored.
421 * Therefore, let's consider only 3 low-order bits.
422 */
423 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
424 /*
425 * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15.
426 *
427 * Choose scratch reg. Order is important: must not select bx
428 * if we can use si (cmpxchg8b case!)
429 */
430 if (reg != 6 && reg2 != 6) {
431 reg2 = 6;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200432 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200433 } else if (reg != 7 && reg2 != 7) {
434 reg2 = 7;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200435 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200436 /* TODO (paranoia): force maskmovq to not use di */
437 } else {
438 reg2 = 3;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200439 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200440 }
441 /*
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530442 * Point cursor at the modrm byte. The next 4 bytes are the
443 * displacement. Beyond the displacement, for some instructions,
444 * is the immediate operand.
445 */
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530446 cursor = auprobe->insn + insn_offset_modrm(insn);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530447 /*
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200448 * Change modrm from "00 reg 101" to "10 reg reg2". Example:
449 * 89 05 disp32 mov %eax,disp32(%rip) becomes
450 * 89 86 disp32 mov %eax,disp32(%rsi)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530451 */
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200452 *cursor = 0x80 | (reg << 3) | reg2;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530453}
454
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200455static inline unsigned long *
456scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
457{
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200458 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200459 return &regs->si;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200460 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200461 return &regs->di;
462 return &regs->bx;
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200463}
464
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200465/*
466 * If we're emulating a rip-relative instruction, save the contents
467 * of the scratch register and store the target address in that register.
468 */
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200469static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200470{
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200471 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200472 struct uprobe_task *utask = current->utask;
473 unsigned long *sr = scratch_reg(auprobe, regs);
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200474
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200475 utask->autask.saved_scratch_register = *sr;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200476 *sr = utask->vaddr + auprobe->defparam.ilen;
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200477 }
478}
479
Denys Vlasenko50204c62014-05-01 16:52:46 +0200480static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200481{
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200482 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200483 struct uprobe_task *utask = current->utask;
484 unsigned long *sr = scratch_reg(auprobe, regs);
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200485
Oleg Nesterovc90a6952014-04-27 18:13:31 +0200486 *sr = utask->autask.saved_scratch_register;
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200487 }
488}
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100489#else /* 32-bit: */
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200490/*
491 * No RIP-relative addressing on 32-bit
492 */
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200493static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530494{
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200495}
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200496static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200497{
498}
Denys Vlasenko50204c62014-05-01 16:52:46 +0200499static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
Oleg Nesterovd20737c2014-03-31 18:35:09 +0200500{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530501}
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530502#endif /* CONFIG_X86_64 */
503
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200504struct uprobe_xol_ops {
505 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
506 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
507 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200508 void (*abort)(struct arch_uprobe *, struct pt_regs *);
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200509};
510
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200511static inline int sizeof_long(void)
512{
Dmitry Safonovabfb9492016-04-18 16:43:43 +0300513 return in_ia32_syscall() ? 4 : 8;
Oleg Nesterov8faaed12014-04-06 17:16:10 +0200514}
515
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200516static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
517{
Oleg Nesterov7f55e822014-04-27 17:00:46 +0200518 riprel_pre_xol(auprobe, regs);
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200519 return 0;
520}
521
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800522static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
Oleg Nesterov2b82cad2014-04-24 19:21:38 +0200523{
524 unsigned long new_sp = regs->sp - sizeof_long();
525
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800526 if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
Oleg Nesterov2b82cad2014-04-24 19:21:38 +0200527 return -EFAULT;
528
529 regs->sp = new_sp;
530 return 0;
531}
532
Denys Vlasenko1ea30fb2014-05-02 17:04:00 +0200533/*
534 * We have to fix things up as follows:
535 *
536 * Typically, the new ip is relative to the copied instruction. We need
537 * to make it relative to the original instruction (FIX_IP). Exceptions
538 * are return instructions and absolute or indirect jump or call instructions.
539 *
540 * If the single-stepped instruction was a call, the return address that
541 * is atop the stack is the address following the copied instruction. We
542 * need to make it the address following the original instruction (FIX_CALL).
543 *
544 * If the original instruction was a rip-relative instruction such as
545 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
546 * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)".
547 * We need to restore the contents of the scratch register
548 * (FIX_RIP_reg).
549 */
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200550static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
551{
552 struct uprobe_task *utask = current->utask;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200553
Denys Vlasenko50204c62014-05-01 16:52:46 +0200554 riprel_post_xol(auprobe, regs);
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200555 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
Denys Vlasenko50204c62014-05-01 16:52:46 +0200556 long correction = utask->vaddr - utask->xol_vaddr;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200557 regs->ip += correction;
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200558 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
559 regs->sp += sizeof_long(); /* Pop incorrect return address */
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800560 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200561 return -ERESTART;
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200562 }
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200563 /* popf; tell the caller to not touch TF */
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200564 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200565 utask->autask.saved_tf = true;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200566
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200567 return 0;
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200568}
569
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200570static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
571{
Denys Vlasenko50204c62014-05-01 16:52:46 +0200572 riprel_post_xol(auprobe, regs);
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200573}
574
Julia Lawalldac42982016-04-09 13:17:29 +0200575static const struct uprobe_xol_ops default_xol_ops = {
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200576 .pre_xol = default_pre_xol_op,
577 .post_xol = default_post_xol_op,
Oleg Nesterov588fbd62014-04-21 16:58:17 +0200578 .abort = default_abort_op,
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200579};
580
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200581static bool branch_is_call(struct arch_uprobe *auprobe)
582{
583 return auprobe->branch.opc1 == 0xe8;
584}
585
Oleg Nesterov8f955052014-04-06 21:53:47 +0200586#define CASE_COND \
587 COND(70, 71, XF(OF)) \
588 COND(72, 73, XF(CF)) \
589 COND(74, 75, XF(ZF)) \
590 COND(78, 79, XF(SF)) \
591 COND(7a, 7b, XF(PF)) \
592 COND(76, 77, XF(CF) || XF(ZF)) \
593 COND(7c, 7d, XF(SF) != XF(OF)) \
594 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
595
596#define COND(op_y, op_n, expr) \
597 case 0x ## op_y: DO((expr) != 0) \
598 case 0x ## op_n: DO((expr) == 0)
599
600#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
601
602static bool is_cond_jmp_opcode(u8 opcode)
603{
604 switch (opcode) {
605 #define DO(expr) \
606 return true;
607 CASE_COND
608 #undef DO
609
610 default:
611 return false;
612 }
613}
614
615static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
616{
617 unsigned long flags = regs->flags;
618
619 switch (auprobe->branch.opc1) {
620 #define DO(expr) \
621 return expr;
622 CASE_COND
623 #undef DO
624
625 default: /* not a conditional jmp */
626 return true;
627 }
628}
629
630#undef XF
631#undef COND
632#undef CASE_COND
633
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200634static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
635{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200636 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200637 unsigned long offs = (long)auprobe->branch.offs;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200638
639 if (branch_is_call(auprobe)) {
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200640 /*
641 * If it fails we execute this (mangled, see the comment in
642 * branch_clear_offset) insn out-of-line. In the likely case
643 * this should trigger the trap, and the probed application
644 * should die or restart the same insn after it handles the
645 * signal, arch_uprobe_post_xol() won't be even called.
646 *
647 * But there is corner case, see the comment in ->post_xol().
648 */
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800649 if (emulate_push_stack(regs, new_ip))
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200650 return false;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200651 } else if (!check_jmp_cond(auprobe, regs)) {
652 offs = 0;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200653 }
654
Oleg Nesterov8f955052014-04-06 21:53:47 +0200655 regs->ip = new_ip + offs;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200656 return true;
657}
658
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800659static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
660{
661 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
662
663 if (emulate_push_stack(regs, *src_ptr))
664 return false;
665 regs->ip += auprobe->push.ilen;
666 return true;
667}
668
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200669static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
670{
671 BUG_ON(!branch_is_call(auprobe));
672 /*
673 * We can only get here if branch_emulate_op() failed to push the ret
674 * address _and_ another thread expanded our stack before the (mangled)
675 * "call" insn was executed out-of-line. Just restore ->sp and restart.
676 * We could also restore ->ip and try to call branch_emulate_op() again.
677 */
678 regs->sp += sizeof_long();
679 return -ERESTART;
680}
681
682static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
683{
684 /*
685 * Turn this insn into "call 1f; 1:", this is what we will execute
686 * out-of-line if ->emulate() fails. We only need this to generate
687 * a trap, so that the probed task receives the correct signal with
688 * the properly filled siginfo.
689 *
690 * But see the comment in ->post_xol(), in the unlikely case it can
691 * succeed. So we need to ensure that the new ->ip can not fall into
692 * the non-canonical area and trigger #GP.
693 *
694 * We could turn it into (say) "pushf", but then we would need to
695 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
696 * of ->insn[] for set_orig_insn().
697 */
698 memset(auprobe->insn + insn_offset_immediate(insn),
699 0, insn->immediate.nbytes);
700}
701
Julia Lawalldac42982016-04-09 13:17:29 +0200702static const struct uprobe_xol_ops branch_xol_ops = {
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200703 .emulate = branch_emulate_op,
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200704 .post_xol = branch_post_xol_op,
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200705};
706
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800707static const struct uprobe_xol_ops push_xol_ops = {
708 .emulate = push_emulate_op,
709};
710
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200711/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
712static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
713{
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200714 u8 opc1 = OPCODE1(insn);
Denys Vlasenko250bbd12014-04-24 19:08:24 +0200715 int i;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200716
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200717 switch (opc1) {
718 case 0xeb: /* jmp 8 */
719 case 0xe9: /* jmp 32 */
720 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
721 break;
722
723 case 0xe8: /* call relative */
724 branch_clear_offset(auprobe, insn);
725 break;
Oleg Nesterov8f955052014-04-06 21:53:47 +0200726
Oleg Nesterov6cc5e7f2014-04-07 16:22:58 +0200727 case 0x0f:
728 if (insn->opcode.nbytes != 2)
729 return -ENOSYS;
730 /*
731 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
732 * OPCODE1() of the "short" jmp which checks the same condition.
733 */
734 opc1 = OPCODE2(insn) - 0x10;
Gustavo A. R. Silva6fcebf12019-01-25 12:39:03 -0600735 /* fall through */
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200736 default:
Oleg Nesterov8f955052014-04-06 21:53:47 +0200737 if (!is_cond_jmp_opcode(opc1))
738 return -ENOSYS;
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200739 }
740
Denys Vlasenko250bbd12014-04-24 19:08:24 +0200741 /*
742 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
743 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
744 * No one uses these insns, reject any branch insns with such prefix.
745 */
746 for (i = 0; i < insn->prefixes.nbytes; i++) {
747 if (insn->prefixes.bytes[i] == 0x66)
748 return -ENOTSUPP;
749 }
750
Oleg Nesterov8e89c0b2014-04-06 18:11:02 +0200751 auprobe->branch.opc1 = opc1;
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200752 auprobe->branch.ilen = insn->length;
753 auprobe->branch.offs = insn->immediate.value;
754
755 auprobe->ops = &branch_xol_ops;
756 return 0;
757}
758
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800759/* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
760static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
761{
762 u8 opc1 = OPCODE1(insn), reg_offset = 0;
763
764 if (opc1 < 0x50 || opc1 > 0x57)
765 return -ENOSYS;
766
767 if (insn->length > 2)
768 return -ENOSYS;
769 if (insn->length == 2) {
770 /* only support rex_prefix 0x41 (x64 only) */
771#ifdef CONFIG_X86_64
772 if (insn->rex_prefix.nbytes != 1 ||
773 insn->rex_prefix.bytes[0] != 0x41)
774 return -ENOSYS;
775
776 switch (opc1) {
777 case 0x50:
778 reg_offset = offsetof(struct pt_regs, r8);
779 break;
780 case 0x51:
781 reg_offset = offsetof(struct pt_regs, r9);
782 break;
783 case 0x52:
784 reg_offset = offsetof(struct pt_regs, r10);
785 break;
786 case 0x53:
787 reg_offset = offsetof(struct pt_regs, r11);
788 break;
789 case 0x54:
790 reg_offset = offsetof(struct pt_regs, r12);
791 break;
792 case 0x55:
793 reg_offset = offsetof(struct pt_regs, r13);
794 break;
795 case 0x56:
796 reg_offset = offsetof(struct pt_regs, r14);
797 break;
798 case 0x57:
799 reg_offset = offsetof(struct pt_regs, r15);
800 break;
801 }
802#else
803 return -ENOSYS;
804#endif
805 } else {
806 switch (opc1) {
807 case 0x50:
808 reg_offset = offsetof(struct pt_regs, ax);
809 break;
810 case 0x51:
811 reg_offset = offsetof(struct pt_regs, cx);
812 break;
813 case 0x52:
814 reg_offset = offsetof(struct pt_regs, dx);
815 break;
816 case 0x53:
817 reg_offset = offsetof(struct pt_regs, bx);
818 break;
819 case 0x54:
820 reg_offset = offsetof(struct pt_regs, sp);
821 break;
822 case 0x55:
823 reg_offset = offsetof(struct pt_regs, bp);
824 break;
825 case 0x56:
826 reg_offset = offsetof(struct pt_regs, si);
827 break;
828 case 0x57:
829 reg_offset = offsetof(struct pt_regs, di);
830 break;
831 }
832 }
833
834 auprobe->push.reg_offset = reg_offset;
835 auprobe->push.ilen = insn->length;
836 auprobe->ops = &push_xol_ops;
837 return 0;
838}
839
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530840/**
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530841 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530842 * @mm: the probed address space.
Srikar Dronamraju3ff54ef2012-02-22 14:46:02 +0530843 * @arch_uprobe: the probepoint information.
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530844 * @addr: virtual address at which to install the probepoint
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530845 * Return 0 on success or a -ve number on error.
846 */
Ananth N Mavinakayanahalli7eb9ba52012-06-08 15:02:57 +0530847int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530848{
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530849 struct insn insn;
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200850 u8 fix_ip_or_call = UPROBE_FIX_IP;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200851 int ret;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530852
Oleg Nesterov2ae1f492014-04-19 14:03:05 +0200853 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200854 if (ret)
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530855 return ret;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100856
Oleg Nesterov7ba6db22014-04-05 20:05:02 +0200857 ret = branch_setup_xol_ops(auprobe, &insn);
858 if (ret != -ENOSYS)
859 return ret;
860
Yonghong Songe7ed9d92017-11-30 16:12:02 -0800861 ret = push_setup_xol_ops(auprobe, &insn);
862 if (ret != -ENOSYS)
863 return ret;
864
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200865 /*
Oleg Nesterov97aa5cd2014-04-22 16:20:55 +0200866 * Figure out which fixups default_post_xol_op() will need to perform,
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200867 * and annotate defparam->fixups accordingly.
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200868 */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200869 switch (OPCODE1(&insn)) {
870 case 0x9d: /* popf */
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200871 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200872 break;
873 case 0xc3: /* ret or lret -- ip is correct */
874 case 0xcb:
875 case 0xc2:
876 case 0xca:
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200877 case 0xea: /* jmp absolute -- ip is correct */
878 fix_ip_or_call = 0;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200879 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200880 case 0x9a: /* call absolute - Fix return addr, not ip */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200881 fix_ip_or_call = UPROBE_FIX_CALL;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200882 break;
883 case 0xff:
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200884 switch (MODRM_REG(&insn)) {
885 case 2: case 3: /* call or lcall, indirect */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200886 fix_ip_or_call = UPROBE_FIX_CALL;
887 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200888 case 4: case 5: /* jmp or ljmp, indirect */
Oleg Nesterov83cd5912014-04-25 18:53:32 +0200889 fix_ip_or_call = 0;
890 break;
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200891 }
Oleg Nesterove55848a2014-03-31 17:24:14 +0200892 /* fall through */
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200893 default:
Oleg Nesterov1475ee72014-04-27 16:31:59 +0200894 riprel_analyze(auprobe, &insn);
Oleg Nesterovddb69f22014-03-31 15:16:22 +0200895 }
896
Oleg Nesterov5cdb76d2014-06-01 21:13:46 +0200897 auprobe->defparam.ilen = insn.length;
898 auprobe->defparam.fixups |= fix_ip_or_call;
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100899
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +0200900 auprobe->ops = &default_xol_ops;
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530901 return 0;
902}
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530903
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530904/*
905 * arch_uprobe_pre_xol - prepare to execute out of line.
906 * @auprobe: the probepoint information.
907 * @regs: reflects the saved user state of current task.
908 */
909int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
910{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200911 struct uprobe_task *utask = current->utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530912
Oleg Nesterovdd910162014-04-22 15:20:07 +0200913 if (auprobe->ops->pre_xol) {
914 int err = auprobe->ops->pre_xol(auprobe, regs);
915 if (err)
916 return err;
917 }
918
Oleg Nesterov34e73172014-03-31 19:38:09 +0200919 regs->ip = utask->xol_vaddr;
920 utask->autask.saved_trap_nr = current->thread.trap_nr;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530921 current->thread.trap_nr = UPROBE_TRAP_NR;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530922
Oleg Nesterov34e73172014-03-31 19:38:09 +0200923 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100924 regs->flags |= X86_EFLAGS_TF;
925 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
926 set_task_blockstep(current, false);
927
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530928 return 0;
929}
930
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530931/*
932 * If xol insn itself traps and generates a signal(Say,
933 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
934 * instruction jumps back to its own address. It is assumed that anything
935 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
936 *
937 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
938 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
939 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
940 */
941bool arch_uprobe_xol_was_trapped(struct task_struct *t)
942{
943 if (t->thread.trap_nr != UPROBE_TRAP_NR)
944 return true;
945
946 return false;
947}
948
949/*
950 * Called after single-stepping. To avoid the SMP problems that can
951 * occur when we temporarily put back the original opcode to
952 * single-step, we single-stepped a copy of the instruction.
953 *
954 * This function prepares to resume execution after the single-step.
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530955 */
956int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
957{
Oleg Nesterov34e73172014-03-31 19:38:09 +0200958 struct uprobe_task *utask = current->utask;
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200959 bool send_sigtrap = utask->autask.saved_tf;
960 int err = 0;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530961
962 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200963 current->thread.trap_nr = utask->autask.saved_trap_nr;
Oleg Nesterov014940b2014-04-03 20:20:10 +0200964
965 if (auprobe->ops->post_xol) {
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200966 err = auprobe->ops->post_xol(auprobe, regs);
Oleg Nesterov014940b2014-04-03 20:20:10 +0200967 if (err) {
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200968 /*
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200969 * Restore ->ip for restart or post mortem analysis.
970 * ->post_xol() must not return -ERESTART unless this
971 * is really possible.
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200972 */
Oleg Nesterov6ded5f32014-04-21 18:28:02 +0200973 regs->ip = utask->vaddr;
Oleg Nesterov75f9ef02014-04-03 20:52:19 +0200974 if (err == -ERESTART)
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200975 err = 0;
976 send_sigtrap = false;
Oleg Nesterov014940b2014-04-03 20:20:10 +0200977 }
978 }
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100979 /*
980 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
981 * so we can get an extra SIGTRAP if we do not clear TF. We need
982 * to examine the opcode to make it right.
983 */
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200984 if (send_sigtrap)
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100985 send_sig(SIGTRAP, current, 0);
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200986
987 if (!utask->autask.saved_tf)
Oleg Nesterov4dc316c2012-10-28 17:57:30 +0100988 regs->flags &= ~X86_EFLAGS_TF;
989
Oleg Nesterov220ef8d2014-04-21 20:39:56 +0200990 return err;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530991}
992
993/* callback routine for handling exceptions. */
994int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
995{
996 struct die_args *args = data;
997 struct pt_regs *regs = args->regs;
998 int ret = NOTIFY_DONE;
999
1000 /* We are only interested in userspace traps */
Andy Lutomirskif39b6f02015-03-18 18:33:33 -07001001 if (regs && !user_mode(regs))
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301002 return NOTIFY_DONE;
1003
1004 switch (val) {
1005 case DIE_INT3:
1006 if (uprobe_pre_sstep_notifier(regs))
1007 ret = NOTIFY_STOP;
1008
1009 break;
1010
1011 case DIE_DEBUG:
1012 if (uprobe_post_sstep_notifier(regs))
1013 ret = NOTIFY_STOP;
1014
1015 default:
1016 break;
1017 }
1018
1019 return ret;
1020}
1021
1022/*
1023 * This function gets called when XOL instruction either gets trapped or
Oleg Nesterov6ded5f32014-04-21 18:28:02 +02001024 * the thread has a fatal signal. Reset the instruction pointer to its
1025 * probed address for the potential restart or for post mortem analysis.
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301026 */
1027void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1028{
1029 struct uprobe_task *utask = current->utask;
1030
Oleg Nesterov588fbd62014-04-21 16:58:17 +02001031 if (auprobe->ops->abort)
1032 auprobe->ops->abort(auprobe, regs);
Oleg Nesterov4dc316c2012-10-28 17:57:30 +01001033
Oleg Nesterov588fbd62014-04-21 16:58:17 +02001034 current->thread.trap_nr = utask->autask.saved_trap_nr;
1035 regs->ip = utask->vaddr;
Oleg Nesterov4dc316c2012-10-28 17:57:30 +01001036 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
1037 if (!utask->autask.saved_tf)
1038 regs->flags &= ~X86_EFLAGS_TF;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301039}
1040
Oleg Nesterov3a4664a2012-09-03 16:05:10 +02001041static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301042{
Oleg Nesterov8ad8e9d2014-03-31 21:01:31 +02001043 if (auprobe->ops->emulate)
1044 return auprobe->ops->emulate(auprobe, regs);
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301045 return false;
1046}
Sebastian Andrzej Siewiorbdc1e472012-08-20 12:47:34 +02001047
Oleg Nesterov3a4664a2012-09-03 16:05:10 +02001048bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1049{
1050 bool ret = __skip_sstep(auprobe, regs);
1051 if (ret && (regs->flags & X86_EFLAGS_TF))
1052 send_sig(SIGTRAP, current, 0);
1053 return ret;
1054}
Anton Arapov791eca12013-04-03 18:00:33 +02001055
1056unsigned long
1057arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1058{
Oleg Nesterov8faaed12014-04-06 17:16:10 +02001059 int rasize = sizeof_long(), nleft;
Anton Arapov791eca12013-04-03 18:00:33 +02001060 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1061
Oleg Nesterov8faaed12014-04-06 17:16:10 +02001062 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
Anton Arapov791eca12013-04-03 18:00:33 +02001063 return -1;
1064
1065 /* check whether address has been already hijacked */
1066 if (orig_ret_vaddr == trampoline_vaddr)
1067 return orig_ret_vaddr;
1068
Oleg Nesterov8faaed12014-04-06 17:16:10 +02001069 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1070 if (likely(!nleft))
Anton Arapov791eca12013-04-03 18:00:33 +02001071 return orig_ret_vaddr;
1072
Oleg Nesterov8faaed12014-04-06 17:16:10 +02001073 if (nleft != rasize) {
Joe Perches1de392f2018-05-10 08:45:30 -07001074 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1075 current->pid, regs->sp, regs->ip);
Anton Arapov791eca12013-04-03 18:00:33 +02001076
Eric W. Biederman3cf5d072019-05-23 10:17:27 -05001077 force_sig(SIGSEGV);
Anton Arapov791eca12013-04-03 18:00:33 +02001078 }
1079
1080 return -1;
1081}
Oleg Nesterov7b868e42015-07-21 15:40:18 +02001082
Oleg Nesterov86dcb702015-07-21 15:40:26 +02001083bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1084 struct pt_regs *regs)
Oleg Nesterov7b868e42015-07-21 15:40:18 +02001085{
Oleg Nesterovdb087ef2015-07-21 15:40:28 +02001086 if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
1087 return regs->sp < ret->stack;
1088 else
1089 return regs->sp <= ret->stack;
Oleg Nesterov7b868e42015-07-21 15:40:18 +02001090}