Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 18 | * Copyright IBM Corp. 2002, 2006 |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 19 | * |
| 20 | * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> |
| 21 | */ |
| 22 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 23 | #include <linux/kprobes.h> |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/preempt.h> |
| 26 | #include <linux/stop_machine.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 27 | #include <linux/kdebug.h> |
Heiko Carstens | a2b5367 | 2009-06-12 10:26:43 +0200 | [diff] [blame] | 28 | #include <linux/uaccess.h> |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 29 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 30 | #include <linux/slab.h> |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 31 | #include <linux/hardirq.h> |
Heiko Carstens | a882b3b | 2013-09-13 13:59:26 +0200 | [diff] [blame] | 32 | #include <asm/cacheflush.h> |
| 33 | #include <asm/sections.h> |
| 34 | #include <asm/dis.h> |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 35 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 36 | DEFINE_PER_CPU(struct kprobe *, current_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 37 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
| 38 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 39 | struct kretprobe_blackpoint kretprobe_blacklist[] = { }; |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 40 | |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 41 | DEFINE_INSN_CACHE_OPS(dmainsn); |
| 42 | |
| 43 | static void *alloc_dmainsn_page(void) |
| 44 | { |
| 45 | return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); |
| 46 | } |
| 47 | |
| 48 | static void free_dmainsn_page(void *page) |
| 49 | { |
| 50 | free_page((unsigned long)page); |
| 51 | } |
| 52 | |
| 53 | struct kprobe_insn_cache kprobe_dmainsn_slots = { |
| 54 | .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), |
| 55 | .alloc = alloc_dmainsn_page, |
| 56 | .free = free_dmainsn_page, |
| 57 | .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), |
| 58 | .insn_size = MAX_INSN_SIZE, |
| 59 | }; |
| 60 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 61 | static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 62 | { |
Heiko Carstens | 1ffa11a | 2013-09-13 14:14:10 +0200 | [diff] [blame] | 63 | if (!is_known_insn((unsigned char *)insn)) |
| 64 | return -EINVAL; |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 65 | switch (insn[0] >> 8) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 66 | case 0x0c: /* bassm */ |
| 67 | case 0x0b: /* bsm */ |
| 68 | case 0x83: /* diag */ |
| 69 | case 0x44: /* ex */ |
Heiko Carstens | bac9f15 | 2010-05-26 23:26:20 +0200 | [diff] [blame] | 70 | case 0xac: /* stnsm */ |
| 71 | case 0xad: /* stosm */ |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 72 | return -EINVAL; |
Heiko Carstens | 7423435 | 2013-09-02 10:22:21 +0200 | [diff] [blame] | 73 | case 0xc6: |
| 74 | switch (insn[0] & 0x0f) { |
| 75 | case 0x00: /* exrl */ |
| 76 | return -EINVAL; |
| 77 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 78 | } |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 79 | switch (insn[0]) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 80 | case 0x0101: /* pr */ |
| 81 | case 0xb25a: /* bsa */ |
| 82 | case 0xb240: /* bakr */ |
| 83 | case 0xb258: /* bsg */ |
| 84 | case 0xb218: /* pc */ |
| 85 | case 0xb228: /* pt */ |
Heiko Carstens | bac9f15 | 2010-05-26 23:26:20 +0200 | [diff] [blame] | 86 | case 0xb98d: /* epsw */ |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 87 | return -EINVAL; |
| 88 | } |
| 89 | return 0; |
| 90 | } |
| 91 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 92 | static int __kprobes get_fixup_type(kprobe_opcode_t *insn) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 93 | { |
| 94 | /* default fixup method */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 95 | int fixup = FIXUP_PSW_NORMAL; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 96 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 97 | switch (insn[0] >> 8) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 98 | case 0x05: /* balr */ |
| 99 | case 0x0d: /* basr */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 100 | fixup = FIXUP_RETURN_REGISTER; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 101 | /* if r2 = 0, no branch will be taken */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 102 | if ((insn[0] & 0x0f) == 0) |
| 103 | fixup |= FIXUP_BRANCH_NOT_TAKEN; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 104 | break; |
| 105 | case 0x06: /* bctr */ |
| 106 | case 0x07: /* bcr */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 107 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 108 | break; |
| 109 | case 0x45: /* bal */ |
| 110 | case 0x4d: /* bas */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 111 | fixup = FIXUP_RETURN_REGISTER; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 112 | break; |
| 113 | case 0x47: /* bc */ |
| 114 | case 0x46: /* bct */ |
| 115 | case 0x86: /* bxh */ |
| 116 | case 0x87: /* bxle */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 117 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 118 | break; |
| 119 | case 0x82: /* lpsw */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 120 | fixup = FIXUP_NOT_REQUIRED; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 121 | break; |
| 122 | case 0xb2: /* lpswe */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 123 | if ((insn[0] & 0xff) == 0xb2) |
| 124 | fixup = FIXUP_NOT_REQUIRED; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 125 | break; |
| 126 | case 0xa7: /* bras */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 127 | if ((insn[0] & 0x0f) == 0x05) |
| 128 | fixup |= FIXUP_RETURN_REGISTER; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 129 | break; |
| 130 | case 0xc0: |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 131 | if ((insn[0] & 0x0f) == 0x05) /* brasl */ |
| 132 | fixup |= FIXUP_RETURN_REGISTER; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 133 | break; |
| 134 | case 0xeb: |
Heiko Carstens | 6b169ac | 2013-08-01 10:16:27 +0200 | [diff] [blame] | 135 | switch (insn[2] & 0xff) { |
| 136 | case 0x44: /* bxhg */ |
| 137 | case 0x45: /* bxleg */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 138 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
Heiko Carstens | 6b169ac | 2013-08-01 10:16:27 +0200 | [diff] [blame] | 139 | break; |
| 140 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 141 | break; |
| 142 | case 0xe3: /* bctg */ |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 143 | if ((insn[2] & 0xff) == 0x46) |
| 144 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 145 | break; |
Heiko Carstens | 6b169ac | 2013-08-01 10:16:27 +0200 | [diff] [blame] | 146 | case 0xec: |
| 147 | switch (insn[2] & 0xff) { |
| 148 | case 0xe5: /* clgrb */ |
| 149 | case 0xe6: /* cgrb */ |
| 150 | case 0xf6: /* crb */ |
| 151 | case 0xf7: /* clrb */ |
| 152 | case 0xfc: /* cgib */ |
| 153 | case 0xfd: /* cglib */ |
| 154 | case 0xfe: /* cib */ |
| 155 | case 0xff: /* clib */ |
| 156 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
| 157 | break; |
| 158 | } |
| 159 | break; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 160 | } |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 161 | return fixup; |
| 162 | } |
| 163 | |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 164 | static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn) |
| 165 | { |
| 166 | /* Check if we have a RIL-b or RIL-c format instruction which |
| 167 | * we need to modify in order to avoid instruction emulation. */ |
| 168 | switch (insn[0] >> 8) { |
| 169 | case 0xc0: |
| 170 | if ((insn[0] & 0x0f) == 0x00) /* larl */ |
| 171 | return true; |
| 172 | break; |
| 173 | case 0xc4: |
| 174 | switch (insn[0] & 0x0f) { |
| 175 | case 0x02: /* llhrl */ |
| 176 | case 0x04: /* lghrl */ |
| 177 | case 0x05: /* lhrl */ |
| 178 | case 0x06: /* llghrl */ |
| 179 | case 0x07: /* sthrl */ |
| 180 | case 0x08: /* lgrl */ |
| 181 | case 0x0b: /* stgrl */ |
| 182 | case 0x0c: /* lgfrl */ |
| 183 | case 0x0d: /* lrl */ |
| 184 | case 0x0e: /* llgfrl */ |
| 185 | case 0x0f: /* strl */ |
| 186 | return true; |
| 187 | } |
| 188 | break; |
| 189 | case 0xc6: |
| 190 | switch (insn[0] & 0x0f) { |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 191 | case 0x02: /* pfdrl */ |
| 192 | case 0x04: /* cghrl */ |
| 193 | case 0x05: /* chrl */ |
| 194 | case 0x06: /* clghrl */ |
| 195 | case 0x07: /* clhrl */ |
| 196 | case 0x08: /* cgrl */ |
| 197 | case 0x0a: /* clgrl */ |
| 198 | case 0x0c: /* cgfrl */ |
| 199 | case 0x0d: /* crl */ |
| 200 | case 0x0e: /* clgfrl */ |
| 201 | case 0x0f: /* clrl */ |
| 202 | return true; |
| 203 | } |
| 204 | break; |
| 205 | } |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | static void __kprobes copy_instruction(struct kprobe *p) |
| 210 | { |
| 211 | s64 disp, new_disp; |
| 212 | u64 addr, new_addr; |
| 213 | |
Heiko Carstens | a882b3b | 2013-09-13 13:59:26 +0200 | [diff] [blame] | 214 | memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 215 | if (!is_insn_relative_long(p->ainsn.insn)) |
| 216 | return; |
| 217 | /* |
| 218 | * For pc-relative instructions in RIL-b or RIL-c format patch the |
| 219 | * RI2 displacement field. We have already made sure that the insn |
| 220 | * slot for the patched instruction is within the same 2GB area |
| 221 | * as the original instruction (either kernel image or module area). |
| 222 | * Therefore the new displacement will always fit. |
| 223 | */ |
| 224 | disp = *(s32 *)&p->ainsn.insn[1]; |
| 225 | addr = (u64)(unsigned long)p->addr; |
| 226 | new_addr = (u64)(unsigned long)p->ainsn.insn; |
| 227 | new_disp = ((addr + (disp * 2)) - new_addr) / 2; |
| 228 | *(s32 *)&p->ainsn.insn[1] = new_disp; |
| 229 | } |
| 230 | |
| 231 | static inline int is_kernel_addr(void *addr) |
| 232 | { |
| 233 | return addr < (void *)_end; |
| 234 | } |
| 235 | |
| 236 | static inline int is_module_addr(void *addr) |
| 237 | { |
| 238 | #ifdef CONFIG_64BIT |
| 239 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); |
| 240 | if (addr < (void *)MODULES_VADDR) |
| 241 | return 0; |
| 242 | if (addr > (void *)MODULES_END) |
| 243 | return 0; |
| 244 | #endif |
| 245 | return 1; |
| 246 | } |
| 247 | |
| 248 | static int __kprobes s390_get_insn_slot(struct kprobe *p) |
| 249 | { |
| 250 | /* |
| 251 | * Get an insn slot that is within the same 2GB area like the original |
| 252 | * instruction. That way instructions with a 32bit signed displacement |
| 253 | * field can be patched and executed within the insn slot. |
| 254 | */ |
| 255 | p->ainsn.insn = NULL; |
| 256 | if (is_kernel_addr(p->addr)) |
| 257 | p->ainsn.insn = get_dmainsn_slot(); |
Heiko Carstens | fcd05b5 | 2013-09-02 10:31:11 +0200 | [diff] [blame] | 258 | else if (is_module_addr(p->addr)) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 259 | p->ainsn.insn = get_insn_slot(); |
| 260 | return p->ainsn.insn ? 0 : -ENOMEM; |
| 261 | } |
| 262 | |
| 263 | static void __kprobes s390_free_insn_slot(struct kprobe *p) |
| 264 | { |
| 265 | if (!p->ainsn.insn) |
| 266 | return; |
| 267 | if (is_kernel_addr(p->addr)) |
| 268 | free_dmainsn_slot(p->ainsn.insn, 0); |
| 269 | else |
| 270 | free_insn_slot(p->ainsn.insn, 0); |
| 271 | p->ainsn.insn = NULL; |
| 272 | } |
| 273 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 274 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 275 | { |
| 276 | if ((unsigned long) p->addr & 0x01) |
| 277 | return -EINVAL; |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 278 | /* Make sure the probe isn't going on a difficult instruction */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 279 | if (is_prohibited_opcode(p->addr)) |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 280 | return -EINVAL; |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 281 | if (s390_get_insn_slot(p)) |
| 282 | return -ENOMEM; |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 283 | p->opcode = *p->addr; |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 284 | copy_instruction(p); |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 285 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 286 | } |
| 287 | |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 288 | struct ins_replace_args { |
| 289 | kprobe_opcode_t *ptr; |
| 290 | kprobe_opcode_t opcode; |
| 291 | }; |
| 292 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 293 | static int __kprobes swap_instruction(void *aref) |
| 294 | { |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 295 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 296 | unsigned long status = kcb->kprobe_status; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 297 | struct ins_replace_args *args = aref; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 298 | |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 299 | kcb->kprobe_status = KPROBE_SWAP_INST; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 300 | probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 301 | kcb->kprobe_status = status; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 302 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
| 306 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 307 | struct ins_replace_args args; |
| 308 | |
| 309 | args.ptr = p->addr; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 310 | args.opcode = BREAKPOINT_INSTRUCTION; |
Rusty Russell | 9b1a4d3 | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 311 | stop_machine(swap_instruction, &args, NULL); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
| 315 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 316 | struct ins_replace_args args; |
| 317 | |
| 318 | args.ptr = p->addr; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 319 | args.opcode = p->opcode; |
Rusty Russell | 9b1a4d3 | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 320 | stop_machine(swap_instruction, &args, NULL); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 324 | { |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 325 | s390_free_insn_slot(p); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 326 | } |
| 327 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 328 | static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, |
| 329 | struct pt_regs *regs, |
| 330 | unsigned long ip) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 331 | { |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 332 | struct per_regs per_kprobe; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 333 | |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 334 | /* Set up the PER control registers %cr9-%cr11 */ |
| 335 | per_kprobe.control = PER_EVENT_IFETCH; |
| 336 | per_kprobe.start = ip; |
| 337 | per_kprobe.end = ip; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 338 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 339 | /* Save control regs and psw mask */ |
| 340 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
| 341 | kcb->kprobe_saved_imask = regs->psw.mask & |
| 342 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); |
| 343 | |
| 344 | /* Set PER control regs, turns on single step for the given address */ |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 345 | __ctl_load(per_kprobe, 9, 11); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 346 | regs->psw.mask |= PSW_MASK_PER; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 347 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 348 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 349 | } |
| 350 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 351 | static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, |
| 352 | struct pt_regs *regs, |
| 353 | unsigned long ip) |
| 354 | { |
| 355 | /* Restore control regs and psw mask, set new psw address */ |
| 356 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); |
| 357 | regs->psw.mask &= ~PSW_MASK_PER; |
| 358 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 359 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
| 360 | } |
| 361 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 362 | /* |
| 363 | * Activate a kprobe by storing its pointer to current_kprobe. The |
| 364 | * previous kprobe is stored in kcb->prev_kprobe. A stack of up to |
| 365 | * two kprobes can be active, see KPROBE_REENTER. |
| 366 | */ |
| 367 | static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 368 | { |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 369 | kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 370 | kcb->prev_kprobe.status = kcb->kprobe_status; |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 371 | __get_cpu_var(current_kprobe) = p; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 372 | } |
| 373 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 374 | /* |
| 375 | * Deactivate a kprobe by backing up to the previous state. If the |
| 376 | * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, |
| 377 | * for any other state prev_kprobe.kp will be NULL. |
| 378 | */ |
| 379 | static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 380 | { |
| 381 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 382 | kcb->kprobe_status = kcb->prev_kprobe.status; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 383 | } |
| 384 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 385 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 386 | struct pt_regs *regs) |
| 387 | { |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 388 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 389 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 390 | /* Replace the return addr with trampoline addr */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 391 | regs->gprs[14] = (unsigned long) &kretprobe_trampoline; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 392 | } |
| 393 | |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 394 | static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, |
| 395 | struct kprobe *p) |
| 396 | { |
| 397 | switch (kcb->kprobe_status) { |
| 398 | case KPROBE_HIT_SSDONE: |
| 399 | case KPROBE_HIT_ACTIVE: |
| 400 | kprobes_inc_nmissed_count(p); |
| 401 | break; |
| 402 | case KPROBE_HIT_SS: |
| 403 | case KPROBE_REENTER: |
| 404 | default: |
| 405 | /* |
| 406 | * A kprobe on the code path to single step an instruction |
| 407 | * is a BUG. The code path resides in the .kprobes.text |
| 408 | * section and is executed with interrupts disabled. |
| 409 | */ |
| 410 | printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); |
| 411 | dump_kprobe(p); |
| 412 | BUG(); |
| 413 | } |
| 414 | } |
| 415 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 416 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
| 417 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 418 | struct kprobe_ctlblk *kcb; |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 419 | struct kprobe *p; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 420 | |
| 421 | /* |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 422 | * We want to disable preemption for the entire duration of kprobe |
| 423 | * processing. That includes the calls to the pre/post handlers |
| 424 | * and single stepping the kprobe instruction. |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 425 | */ |
| 426 | preempt_disable(); |
| 427 | kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 428 | p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 429 | |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 430 | if (p) { |
| 431 | if (kprobe_running()) { |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 432 | /* |
| 433 | * We have hit a kprobe while another is still |
| 434 | * active. This can happen in the pre and post |
| 435 | * handler. Single step the instruction of the |
| 436 | * new probe but do not call any handler function |
| 437 | * of this secondary kprobe. |
| 438 | * push_kprobe and pop_kprobe saves and restores |
| 439 | * the currently active kprobe. |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 440 | */ |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 441 | kprobe_reenter_check(kcb, p); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 442 | push_kprobe(kcb, p); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 443 | kcb->kprobe_status = KPROBE_REENTER; |
| 444 | } else { |
| 445 | /* |
| 446 | * If we have no pre-handler or it returned 0, we |
| 447 | * continue with single stepping. If we have a |
| 448 | * pre-handler and it returned non-zero, it prepped |
| 449 | * for calling the break_handler below on re-entry |
| 450 | * for jprobe processing, so get out doing nothing |
| 451 | * more here. |
| 452 | */ |
| 453 | push_kprobe(kcb, p); |
| 454 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 455 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 456 | return 1; |
| 457 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 458 | } |
| 459 | enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); |
| 460 | return 1; |
| 461 | } else if (kprobe_running()) { |
| 462 | p = __get_cpu_var(current_kprobe); |
| 463 | if (p->break_handler && p->break_handler(p, regs)) { |
| 464 | /* |
| 465 | * Continuation after the jprobe completed and |
| 466 | * caused the jprobe_return trap. The jprobe |
| 467 | * break_handler "returns" to the original |
| 468 | * function that still has the kprobe breakpoint |
| 469 | * installed. We continue with single stepping. |
| 470 | */ |
| 471 | kcb->kprobe_status = KPROBE_HIT_SS; |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 472 | enable_singlestep(kcb, regs, |
| 473 | (unsigned long) p->ainsn.insn); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 474 | return 1; |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 475 | } /* else: |
| 476 | * No kprobe at this address and the current kprobe |
| 477 | * has no break handler (no jprobe!). The kernel just |
| 478 | * exploded, let the standard trap handler pick up the |
| 479 | * pieces. |
| 480 | */ |
| 481 | } /* else: |
| 482 | * No kprobe at this address and no active kprobe. The trap has |
| 483 | * not been caused by a kprobe breakpoint. The race of breakpoint |
| 484 | * vs. kprobe remove does not exist because on s390 as we use |
| 485 | * stop_machine to arm/disarm the breakpoints. |
| 486 | */ |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 487 | preempt_enable_no_resched(); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 488 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 489 | } |
| 490 | |
| 491 | /* |
| 492 | * Function return probe trampoline: |
| 493 | * - init_kprobes() establishes a probepoint here |
| 494 | * - When the probed function returns, this probe |
| 495 | * causes the handlers to fire |
| 496 | */ |
Heiko Carstens | a806170 | 2008-04-17 07:46:26 +0200 | [diff] [blame] | 497 | static void __used kretprobe_trampoline_holder(void) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 498 | { |
| 499 | asm volatile(".global kretprobe_trampoline\n" |
| 500 | "kretprobe_trampoline: bcr 0,0\n"); |
| 501 | } |
| 502 | |
| 503 | /* |
| 504 | * Called when the probe at kretprobe trampoline is hit |
| 505 | */ |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 506 | static int __kprobes trampoline_probe_handler(struct kprobe *p, |
| 507 | struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 508 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 509 | struct kretprobe_instance *ri; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 510 | struct hlist_head *head, empty_rp; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 511 | struct hlist_node *tmp; |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 512 | unsigned long flags, orig_ret_address; |
| 513 | unsigned long trampoline_address; |
| 514 | kprobe_opcode_t *correct_ret_addr; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 515 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 516 | INIT_HLIST_HEAD(&empty_rp); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 517 | kretprobe_hash_lock(current, &head, &flags); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 518 | |
| 519 | /* |
| 520 | * It is possible to have multiple instances associated with a given |
| 521 | * task either because an multiple functions in the call path |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 522 | * have a return probe installed on them, and/or more than one return |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 523 | * return probe was registered for a target function. |
| 524 | * |
| 525 | * We can handle this because: |
| 526 | * - instances are always inserted at the head of the list |
| 527 | * - when multiple return probes are registered for the same |
| 528 | * function, the first instance's ret_addr will point to the |
| 529 | * real return address, and all the rest will point to |
| 530 | * kretprobe_trampoline |
| 531 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 532 | ri = NULL; |
| 533 | orig_ret_address = 0; |
| 534 | correct_ret_addr = NULL; |
| 535 | trampoline_address = (unsigned long) &kretprobe_trampoline; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 536 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 537 | if (ri->task != current) |
| 538 | /* another task is sharing our hash bucket */ |
| 539 | continue; |
| 540 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 541 | orig_ret_address = (unsigned long) ri->ret_addr; |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 542 | |
| 543 | if (orig_ret_address != trampoline_address) |
| 544 | /* |
| 545 | * This is the real return address. Any other |
| 546 | * instances associated with this task are for |
| 547 | * other calls deeper on the call stack |
| 548 | */ |
| 549 | break; |
| 550 | } |
| 551 | |
| 552 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
| 553 | |
| 554 | correct_ret_addr = ri->ret_addr; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 555 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 556 | if (ri->task != current) |
| 557 | /* another task is sharing our hash bucket */ |
| 558 | continue; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 559 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 560 | orig_ret_address = (unsigned long) ri->ret_addr; |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 561 | |
| 562 | if (ri->rp && ri->rp->handler) { |
| 563 | ri->ret_addr = correct_ret_addr; |
| 564 | ri->rp->handler(ri, regs); |
| 565 | } |
| 566 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 567 | recycle_rp_inst(ri, &empty_rp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 568 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 569 | if (orig_ret_address != trampoline_address) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 570 | /* |
| 571 | * This is the real return address. Any other |
| 572 | * instances associated with this task are for |
| 573 | * other calls deeper on the call stack |
| 574 | */ |
| 575 | break; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 576 | } |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 577 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 578 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
| 579 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 580 | pop_kprobe(get_kprobe_ctlblk()); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 581 | kretprobe_hash_unlock(current, &flags); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 582 | preempt_enable_no_resched(); |
| 583 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 584 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 585 | hlist_del(&ri->hlist); |
| 586 | kfree(ri); |
| 587 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 588 | /* |
| 589 | * By returning a non-zero value, we are telling |
| 590 | * kprobe_handler() that we don't want the post_handler |
| 591 | * to run (and have re-enabled preemption) |
| 592 | */ |
| 593 | return 1; |
| 594 | } |
| 595 | |
| 596 | /* |
| 597 | * Called after single-stepping. p->addr is the address of the |
| 598 | * instruction whose first byte has been replaced by the "breakpoint" |
| 599 | * instruction. To avoid the SMP problems that can occur when we |
| 600 | * temporarily put back the original opcode to single-step, we |
| 601 | * single-stepped a copy of the instruction. The address of this |
| 602 | * copy is p->ainsn.insn. |
| 603 | */ |
| 604 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
| 605 | { |
| 606 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 607 | unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 608 | int fixup = get_fixup_type(p->ainsn.insn); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 609 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 610 | if (fixup & FIXUP_PSW_NORMAL) |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 611 | ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 612 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 613 | if (fixup & FIXUP_BRANCH_NOT_TAKEN) { |
Heiko Carstens | a882b3b | 2013-09-13 13:59:26 +0200 | [diff] [blame] | 614 | int ilen = insn_length(p->ainsn.insn[0] >> 8); |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 615 | if (ip - (unsigned long) p->ainsn.insn == ilen) |
| 616 | ip = (unsigned long) p->addr + ilen; |
| 617 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 618 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 619 | if (fixup & FIXUP_RETURN_REGISTER) { |
| 620 | int reg = (p->ainsn.insn[0] & 0xf0) >> 4; |
| 621 | regs->gprs[reg] += (unsigned long) p->addr - |
| 622 | (unsigned long) p->ainsn.insn; |
| 623 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 624 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 625 | disable_singlestep(kcb, regs, ip); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 626 | } |
| 627 | |
| 628 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
| 629 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 630 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 631 | struct kprobe *p = kprobe_running(); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 632 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 633 | if (!p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 634 | return 0; |
| 635 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 636 | if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 637 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 638 | p->post_handler(p, regs, 0); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 639 | } |
| 640 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 641 | resume_execution(p, regs); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 642 | pop_kprobe(kcb); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 643 | preempt_enable_no_resched(); |
| 644 | |
| 645 | /* |
| 646 | * if somebody else is singlestepping across a probe point, psw mask |
| 647 | * will have PER set, in which case, continue the remaining processing |
| 648 | * of do_single_step, as if this is not a probe hit. |
| 649 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 650 | if (regs->psw.mask & PSW_MASK_PER) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 651 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 652 | |
| 653 | return 1; |
| 654 | } |
| 655 | |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 656 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 657 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 658 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 659 | struct kprobe *p = kprobe_running(); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 660 | const struct exception_table_entry *entry; |
| 661 | |
| 662 | switch(kcb->kprobe_status) { |
| 663 | case KPROBE_SWAP_INST: |
| 664 | /* We are here because the instruction replacement failed */ |
| 665 | return 0; |
| 666 | case KPROBE_HIT_SS: |
| 667 | case KPROBE_REENTER: |
| 668 | /* |
| 669 | * We are here because the instruction being single |
| 670 | * stepped caused a page fault. We reset the current |
| 671 | * kprobe and the nip points back to the probe address |
| 672 | * and allow the page fault handler to continue as a |
| 673 | * normal page fault. |
| 674 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 675 | disable_singlestep(kcb, regs, (unsigned long) p->addr); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 676 | pop_kprobe(kcb); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 677 | preempt_enable_no_resched(); |
| 678 | break; |
| 679 | case KPROBE_HIT_ACTIVE: |
| 680 | case KPROBE_HIT_SSDONE: |
| 681 | /* |
| 682 | * We increment the nmissed count for accounting, |
Anoop Thomas Mathew | 23d6d3d | 2013-09-20 09:25:41 +0530 | [diff] [blame] | 683 | * we can also use npre/npostfault count for accounting |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 684 | * these specific fault cases. |
| 685 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 686 | kprobes_inc_nmissed_count(p); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 687 | |
| 688 | /* |
| 689 | * We come here because instructions in the pre/post |
| 690 | * handler caused the page_fault, this could happen |
| 691 | * if handler tries to access user space by |
| 692 | * copy_from_user(), get_user() etc. Let the |
| 693 | * user-specified handler try to fix it first. |
| 694 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 695 | if (p->fault_handler && p->fault_handler(p, regs, trapnr)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 696 | return 1; |
| 697 | |
| 698 | /* |
| 699 | * In case the user-specified fault handler returned |
| 700 | * zero, try to fix up. |
| 701 | */ |
| 702 | entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
| 703 | if (entry) { |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 704 | regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 705 | return 1; |
| 706 | } |
| 707 | |
| 708 | /* |
| 709 | * fixup_exception() could not handle it, |
| 710 | * Let do_page_fault() fix it. |
| 711 | */ |
| 712 | break; |
| 713 | default: |
| 714 | break; |
| 715 | } |
| 716 | return 0; |
| 717 | } |
| 718 | |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 719 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
| 720 | { |
| 721 | int ret; |
| 722 | |
| 723 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 724 | local_irq_disable(); |
| 725 | ret = kprobe_trap_handler(regs, trapnr); |
| 726 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 727 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); |
| 728 | return ret; |
| 729 | } |
| 730 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 731 | /* |
| 732 | * Wrapper routine to for handling exceptions. |
| 733 | */ |
| 734 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
| 735 | unsigned long val, void *data) |
| 736 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 737 | struct die_args *args = (struct die_args *) data; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 738 | struct pt_regs *regs = args->regs; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 739 | int ret = NOTIFY_DONE; |
| 740 | |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 741 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 742 | local_irq_disable(); |
| 743 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 744 | switch (val) { |
| 745 | case DIE_BPT: |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 746 | if (kprobe_handler(regs)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 747 | ret = NOTIFY_STOP; |
| 748 | break; |
| 749 | case DIE_SSTEP: |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 750 | if (post_kprobe_handler(regs)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 751 | ret = NOTIFY_STOP; |
| 752 | break; |
| 753 | case DIE_TRAP: |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 754 | if (!preemptible() && kprobe_running() && |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 755 | kprobe_trap_handler(regs, args->trapnr)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 756 | ret = NOTIFY_STOP; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 757 | break; |
| 758 | default: |
| 759 | break; |
| 760 | } |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 761 | |
| 762 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 763 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); |
| 764 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 765 | return ret; |
| 766 | } |
| 767 | |
| 768 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 769 | { |
| 770 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 771 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 772 | unsigned long stack; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 773 | |
| 774 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
| 775 | |
| 776 | /* setup return addr to the jprobe handler routine */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 777 | regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 778 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 779 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 780 | /* r15 is the stack pointer */ |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 781 | stack = (unsigned long) regs->gprs[15]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 782 | |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 783 | memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 784 | return 1; |
| 785 | } |
| 786 | |
| 787 | void __kprobes jprobe_return(void) |
| 788 | { |
| 789 | asm volatile(".word 0x0002"); |
| 790 | } |
| 791 | |
Martin Schwidefsky | c4736d9 | 2011-10-30 15:17:11 +0100 | [diff] [blame] | 792 | static void __used __kprobes jprobe_return_end(void) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 793 | { |
| 794 | asm volatile("bcr 0,0"); |
| 795 | } |
| 796 | |
| 797 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 798 | { |
| 799 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 800 | unsigned long stack; |
| 801 | |
| 802 | stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 803 | |
| 804 | /* Put the regs back */ |
| 805 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
| 806 | /* put the stack back */ |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 807 | memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 808 | preempt_enable_no_resched(); |
| 809 | return 1; |
| 810 | } |
| 811 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 812 | static struct kprobe trampoline = { |
| 813 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 814 | .pre_handler = trampoline_probe_handler |
| 815 | }; |
| 816 | |
| 817 | int __init arch_init_kprobes(void) |
| 818 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 819 | return register_kprobe(&trampoline); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 820 | } |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 821 | |
| 822 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
| 823 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 824 | return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 825 | } |