Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 19 | * |
| 20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
| 21 | * Probes initial implementation ( includes contributions from |
| 22 | * Rusty Russell). |
| 23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
| 24 | * interface to access function arguments. |
| 25 | * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port |
| 26 | * for PPC64 |
| 27 | */ |
| 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/kprobes.h> |
| 30 | #include <linux/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/preempt.h> |
Paul Gortmaker | 8a39b05 | 2016-08-16 10:57:34 -0400 | [diff] [blame] | 32 | #include <linux/extable.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 33 | #include <linux/kdebug.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Michael Ellerman | 2f0143c | 2014-06-23 13:23:31 +1000 | [diff] [blame] | 35 | #include <asm/code-patching.h> |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 36 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <asm/sstep.h> |
Naveen N. Rao | 7aa5b01 | 2017-04-19 20:59:51 +0530 | [diff] [blame] | 38 | #include <asm/sections.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 39 | #include <linux/uaccess.h> |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 40 | |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 41 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 42 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 44 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; |
| 45 | |
Naveen N. Rao | c05b8c4 | 2017-06-01 16:18:17 +0530 | [diff] [blame] | 46 | int is_current_kprobe_addr(unsigned long addr) |
| 47 | { |
| 48 | struct kprobe *p = kprobe_running(); |
| 49 | return (p && (unsigned long)p->addr == addr) ? 1 : 0; |
| 50 | } |
| 51 | |
Naveen N. Rao | 7aa5b01 | 2017-04-19 20:59:51 +0530 | [diff] [blame] | 52 | bool arch_within_kprobe_blacklist(unsigned long addr) |
| 53 | { |
| 54 | return (addr >= (unsigned long)__kprobes_text_start && |
| 55 | addr < (unsigned long)__kprobes_text_end) || |
| 56 | (addr >= (unsigned long)_stext && |
| 57 | addr < (unsigned long)__head_end); |
| 58 | } |
| 59 | |
Naveen N. Rao | 290e307 | 2017-04-19 18:21:01 +0530 | [diff] [blame] | 60 | kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) |
Naveen N. Rao | 49e0b46 | 2017-04-19 18:21:00 +0530 | [diff] [blame] | 61 | { |
| 62 | kprobe_opcode_t *addr; |
| 63 | |
| 64 | #ifdef PPC64_ELF_ABI_v2 |
| 65 | /* PPC64 ABIv2 needs local entry point */ |
| 66 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); |
Naveen N. Rao | 24bd909 | 2017-04-19 18:22:28 +0530 | [diff] [blame] | 67 | if (addr && !offset) { |
| 68 | #ifdef CONFIG_KPROBES_ON_FTRACE |
| 69 | unsigned long faddr; |
| 70 | /* |
| 71 | * Per livepatch.h, ftrace location is always within the first |
| 72 | * 16 bytes of a function on powerpc with -mprofile-kernel. |
| 73 | */ |
| 74 | faddr = ftrace_location_range((unsigned long)addr, |
| 75 | (unsigned long)addr + 16); |
| 76 | if (faddr) |
| 77 | addr = (kprobe_opcode_t *)faddr; |
| 78 | else |
| 79 | #endif |
| 80 | addr = (kprobe_opcode_t *)ppc_function_entry(addr); |
| 81 | } |
Naveen N. Rao | 49e0b46 | 2017-04-19 18:21:00 +0530 | [diff] [blame] | 82 | #elif defined(PPC64_ELF_ABI_v1) |
| 83 | /* |
| 84 | * 64bit powerpc ABIv1 uses function descriptors: |
| 85 | * - Check for the dot variant of the symbol first. |
| 86 | * - If that fails, try looking up the symbol provided. |
| 87 | * |
| 88 | * This ensures we always get to the actual symbol and not |
| 89 | * the descriptor. |
| 90 | * |
| 91 | * Also handle <module:symbol> format. |
| 92 | */ |
| 93 | char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; |
| 94 | const char *modsym; |
| 95 | bool dot_appended = false; |
| 96 | if ((modsym = strchr(name, ':')) != NULL) { |
| 97 | modsym++; |
| 98 | if (*modsym != '\0' && *modsym != '.') { |
| 99 | /* Convert to <module:.symbol> */ |
| 100 | strncpy(dot_name, name, modsym - name); |
| 101 | dot_name[modsym - name] = '.'; |
| 102 | dot_name[modsym - name + 1] = '\0'; |
| 103 | strncat(dot_name, modsym, |
| 104 | sizeof(dot_name) - (modsym - name) - 2); |
| 105 | dot_appended = true; |
| 106 | } else { |
| 107 | dot_name[0] = '\0'; |
| 108 | strncat(dot_name, name, sizeof(dot_name) - 1); |
| 109 | } |
| 110 | } else if (name[0] != '.') { |
| 111 | dot_name[0] = '.'; |
| 112 | dot_name[1] = '\0'; |
| 113 | strncat(dot_name, name, KSYM_NAME_LEN - 2); |
| 114 | dot_appended = true; |
| 115 | } else { |
| 116 | dot_name[0] = '\0'; |
| 117 | strncat(dot_name, name, KSYM_NAME_LEN - 1); |
| 118 | } |
| 119 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); |
| 120 | if (!addr && dot_appended) { |
| 121 | /* Let's try the original non-dot symbol lookup */ |
| 122 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); |
| 123 | } |
| 124 | #else |
| 125 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); |
| 126 | #endif |
| 127 | |
| 128 | return addr; |
| 129 | } |
| 130 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 131 | int arch_prepare_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { |
Ananth N Mavinakayanahalli | 63224d1e8 | 2005-06-08 15:49:41 -0700 | [diff] [blame] | 133 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | kprobe_opcode_t insn = *p->addr; |
| 135 | |
Ananth N Mavinakayanahalli | 63224d1e8 | 2005-06-08 15:49:41 -0700 | [diff] [blame] | 136 | if ((unsigned long)p->addr & 0x03) { |
| 137 | printk("Attempt to register kprobe at an unaligned address\n"); |
| 138 | ret = -EINVAL; |
Kumar Gala | 8209003 | 2007-02-06 22:55:19 -0600 | [diff] [blame] | 139 | } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { |
| 140 | printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); |
Ananth N Mavinakayanahalli | 63224d1e8 | 2005-06-08 15:49:41 -0700 | [diff] [blame] | 141 | ret = -EINVAL; |
| 142 | } |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 143 | |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 144 | /* insn must be on a special executable page on ppc64. This is |
| 145 | * not explicitly required on ppc32 (right now), but it doesn't hurt */ |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 146 | if (!ret) { |
Ananth N Mavinakayanahalli | 2d8ab6a | 2005-10-01 13:14:17 -0400 | [diff] [blame] | 147 | p->ainsn.insn = get_insn_slot(); |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 148 | if (!p->ainsn.insn) |
| 149 | ret = -ENOMEM; |
| 150 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | |
Anil S Keshavamurthy | 49a2a1b | 2006-01-09 20:52:43 -0800 | [diff] [blame] | 152 | if (!ret) { |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 153 | memcpy(p->ainsn.insn, p->addr, |
| 154 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
Anil S Keshavamurthy | 49a2a1b | 2006-01-09 20:52:43 -0800 | [diff] [blame] | 155 | p->opcode = *p->addr; |
Ananth N Mavinakayanahalli | 83db3dd | 2006-08-11 17:01:34 +0530 | [diff] [blame] | 156 | flush_icache_range((unsigned long)p->ainsn.insn, |
| 157 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); |
Anil S Keshavamurthy | 49a2a1b | 2006-01-09 20:52:43 -0800 | [diff] [blame] | 158 | } |
| 159 | |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 160 | p->ainsn.boostable = 0; |
Anil S Keshavamurthy | 49a2a1b | 2006-01-09 20:52:43 -0800 | [diff] [blame] | 161 | return ret; |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 162 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 163 | NOKPROBE_SYMBOL(arch_prepare_kprobe); |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 164 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 165 | void arch_arm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 166 | { |
| 167 | *p->addr = BREAKPOINT_INSTRUCTION; |
| 168 | flush_icache_range((unsigned long) p->addr, |
| 169 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); |
| 170 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 171 | NOKPROBE_SYMBOL(arch_arm_kprobe); |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 172 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 173 | void arch_disarm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 174 | { |
| 175 | *p->addr = p->opcode; |
| 176 | flush_icache_range((unsigned long) p->addr, |
| 177 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 179 | NOKPROBE_SYMBOL(arch_disarm_kprobe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 181 | void arch_remove_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { |
Masami Hiramatsu | 1294156 | 2009-01-06 14:41:50 -0800 | [diff] [blame] | 183 | if (p->ainsn.insn) { |
| 184 | free_insn_slot(p->ainsn.insn, 0); |
| 185 | p->ainsn.insn = NULL; |
| 186 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 188 | NOKPROBE_SYMBOL(arch_remove_kprobe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 190 | static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { |
Suzuki K. Poulose | 35fd219 | 2012-12-03 20:38:37 +0530 | [diff] [blame] | 192 | enable_single_step(regs); |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 193 | |
Ananth N Mavinakayanahalli | 0ccde0a | 2006-04-28 17:38:42 +0530 | [diff] [blame] | 194 | /* |
| 195 | * On powerpc we should single step on the original |
| 196 | * instruction even if the probed insn is a trap |
| 197 | * variant as values in regs could play a part in |
| 198 | * if the trap is taken or not |
| 199 | */ |
| 200 | regs->nip = (unsigned long)p->ainsn.insn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } |
| 202 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 203 | static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 204 | { |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 205 | kcb->prev_kprobe.kp = kprobe_running(); |
| 206 | kcb->prev_kprobe.status = kcb->kprobe_status; |
| 207 | kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 208 | } |
| 209 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 210 | static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 211 | { |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 212 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 213 | kcb->kprobe_status = kcb->prev_kprobe.status; |
| 214 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; |
| 215 | } |
| 216 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 217 | static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 218 | struct kprobe_ctlblk *kcb) |
| 219 | { |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 220 | __this_cpu_write(current_kprobe, p); |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 221 | kcb->kprobe_saved_msr = regs->msr; |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Naveen N. Rao | a64e3f3 | 2017-03-08 13:56:07 +0530 | [diff] [blame] | 224 | bool arch_function_offset_within_entry(unsigned long offset) |
| 225 | { |
| 226 | #ifdef PPC64_ELF_ABI_v2 |
Naveen N. Rao | ead514d | 2017-04-19 18:22:26 +0530 | [diff] [blame] | 227 | #ifdef CONFIG_KPROBES_ON_FTRACE |
| 228 | return offset <= 16; |
| 229 | #else |
Naveen N. Rao | a64e3f3 | 2017-03-08 13:56:07 +0530 | [diff] [blame] | 230 | return offset <= 8; |
Naveen N. Rao | ead514d | 2017-04-19 18:22:26 +0530 | [diff] [blame] | 231 | #endif |
Naveen N. Rao | a64e3f3 | 2017-03-08 13:56:07 +0530 | [diff] [blame] | 232 | #else |
| 233 | return !offset; |
| 234 | #endif |
| 235 | } |
| 236 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 237 | void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 238 | { |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 239 | ri->ret_addr = (kprobe_opcode_t *)regs->link; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 240 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 241 | /* Replace the return addr with trampoline addr */ |
| 242 | regs->link = (unsigned long)kretprobe_trampoline; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 243 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 244 | NOKPROBE_SYMBOL(arch_prepare_kretprobe); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 245 | |
Michael Ellerman | 9fc8491 | 2017-04-25 00:24:04 +1000 | [diff] [blame] | 246 | int try_to_emulate(struct kprobe *p, struct pt_regs *regs) |
Naveen N. Rao | 1cabd2f | 2017-04-19 18:21:04 +0530 | [diff] [blame] | 247 | { |
| 248 | int ret; |
| 249 | unsigned int insn = *p->ainsn.insn; |
| 250 | |
| 251 | /* regs->nip is also adjusted if emulate_step returns 1 */ |
| 252 | ret = emulate_step(regs, insn); |
| 253 | if (ret > 0) { |
| 254 | /* |
| 255 | * Once this instruction has been boosted |
| 256 | * successfully, set the boostable flag |
| 257 | */ |
| 258 | if (unlikely(p->ainsn.boostable == 0)) |
| 259 | p->ainsn.boostable = 1; |
| 260 | } else if (ret < 0) { |
| 261 | /* |
| 262 | * We don't allow kprobes on mtmsr(d)/rfi(d), etc. |
| 263 | * So, we should never get here... but, its still |
| 264 | * good to catch them, just in case... |
| 265 | */ |
| 266 | printk("Can't step on instruction %x\n", insn); |
| 267 | BUG(); |
| 268 | } else if (ret == 0) |
| 269 | /* This instruction can't be boosted */ |
| 270 | p->ainsn.boostable = -1; |
| 271 | |
| 272 | return ret; |
| 273 | } |
Michael Ellerman | 9fc8491 | 2017-04-25 00:24:04 +1000 | [diff] [blame] | 274 | NOKPROBE_SYMBOL(try_to_emulate); |
Naveen N. Rao | 1cabd2f | 2017-04-19 18:21:04 +0530 | [diff] [blame] | 275 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 276 | int kprobe_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { |
| 278 | struct kprobe *p; |
| 279 | int ret = 0; |
| 280 | unsigned int *addr = (unsigned int *)regs->nip; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 281 | struct kprobe_ctlblk *kcb; |
| 282 | |
Naveen N. Rao | 6cc89ba | 2016-11-21 22:36:41 +0530 | [diff] [blame] | 283 | if (user_mode(regs)) |
| 284 | return 0; |
| 285 | |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 286 | /* |
| 287 | * We don't want to be preempted for the entire |
| 288 | * duration of kprobe processing |
| 289 | */ |
| 290 | preempt_disable(); |
| 291 | kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | |
| 293 | /* Check we're not actually recursing */ |
| 294 | if (kprobe_running()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | p = get_kprobe(addr); |
| 296 | if (p) { |
Keshavamurthy Anil S | deac66a | 2005-09-06 15:19:35 -0700 | [diff] [blame] | 297 | kprobe_opcode_t insn = *p->ainsn.insn; |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 298 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
Keshavamurthy Anil S | deac66a | 2005-09-06 15:19:35 -0700 | [diff] [blame] | 299 | is_trap(insn)) { |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 300 | /* Turn off 'trace' bits */ |
| 301 | regs->msr &= ~MSR_SINGLESTEP; |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 302 | regs->msr |= kcb->kprobe_saved_msr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | goto no_kprobe; |
| 304 | } |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 305 | /* We have reentered the kprobe_handler(), since |
| 306 | * another probe was hit while within the handler. |
| 307 | * We here save the original kprobes variables and |
| 308 | * just single step on the instruction of the new probe |
| 309 | * without calling any user handlers. |
| 310 | */ |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 311 | save_previous_kprobe(kcb); |
| 312 | set_current_kprobe(p, regs, kcb); |
Keshavamurthy Anil S | bf8d5c5 | 2005-12-12 00:37:34 -0800 | [diff] [blame] | 313 | kprobes_inc_nmissed_count(p); |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 314 | kcb->kprobe_status = KPROBE_REENTER; |
Naveen N. Rao | 22d8b3d | 2017-04-19 18:21:05 +0530 | [diff] [blame] | 315 | if (p->ainsn.boostable >= 0) { |
| 316 | ret = try_to_emulate(p, regs); |
| 317 | |
| 318 | if (ret > 0) { |
| 319 | restore_previous_kprobe(kcb); |
Naveen N. Rao | d04c02f | 2017-05-15 23:40:05 +0530 | [diff] [blame] | 320 | preempt_enable_no_resched(); |
Naveen N. Rao | 22d8b3d | 2017-04-19 18:21:05 +0530 | [diff] [blame] | 321 | return 1; |
| 322 | } |
| 323 | } |
Naveen N. Rao | d04c02f | 2017-05-15 23:40:05 +0530 | [diff] [blame] | 324 | prepare_singlestep(p, regs); |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 325 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } else { |
Keshavamurthy Anil S | eb3a729 | 2006-01-11 12:17:42 -0800 | [diff] [blame] | 327 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 328 | /* If trap variant, then it belongs not to us */ |
| 329 | kprobe_opcode_t cur_insn = *addr; |
| 330 | if (is_trap(cur_insn)) |
| 331 | goto no_kprobe; |
| 332 | /* The breakpoint instruction was removed by |
| 333 | * another cpu right after we hit, no further |
| 334 | * handling of this interrupt is appropriate |
| 335 | */ |
| 336 | ret = 1; |
| 337 | goto no_kprobe; |
| 338 | } |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 339 | p = __this_cpu_read(current_kprobe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | if (p->break_handler && p->break_handler(p, regs)) { |
Naveen N. Rao | ead514d | 2017-04-19 18:22:26 +0530 | [diff] [blame] | 341 | if (!skip_singlestep(p, regs, kcb)) |
| 342 | goto ss_probe; |
| 343 | ret = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | } |
| 345 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | goto no_kprobe; |
| 347 | } |
| 348 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | p = get_kprobe(addr); |
| 350 | if (!p) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 352 | /* |
| 353 | * PowerPC has multiple variants of the "trap" |
| 354 | * instruction. If the current instruction is a |
| 355 | * trap variant, it could belong to someone else |
| 356 | */ |
| 357 | kprobe_opcode_t cur_insn = *addr; |
Keshavamurthy Anil S | deac66a | 2005-09-06 15:19:35 -0700 | [diff] [blame] | 358 | if (is_trap(cur_insn)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | goto no_kprobe; |
| 360 | /* |
| 361 | * The breakpoint instruction was removed right |
| 362 | * after we hit it. Another cpu has removed |
| 363 | * either a probepoint or a debugger breakpoint |
| 364 | * at this address. In either case, no further |
| 365 | * handling of this interrupt is appropriate. |
| 366 | */ |
| 367 | ret = 1; |
| 368 | } |
| 369 | /* Not one of ours: let kernel handle it */ |
| 370 | goto no_kprobe; |
| 371 | } |
| 372 | |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 373 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 374 | set_current_kprobe(p, regs, kcb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 376 | /* handler has already set things up, so skip ss setup */ |
| 377 | return 1; |
| 378 | |
| 379 | ss_probe: |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 380 | if (p->ainsn.boostable >= 0) { |
Naveen N. Rao | 1cabd2f | 2017-04-19 18:21:04 +0530 | [diff] [blame] | 381 | ret = try_to_emulate(p, regs); |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 382 | |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 383 | if (ret > 0) { |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 384 | if (p->post_handler) |
| 385 | p->post_handler(p, regs, 0); |
| 386 | |
| 387 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 388 | reset_current_kprobe(); |
| 389 | preempt_enable_no_resched(); |
| 390 | return 1; |
Naveen N. Rao | 1cabd2f | 2017-04-19 18:21:04 +0530 | [diff] [blame] | 391 | } |
Ananth N Mavinakayanahalli | e6349a95 | 2007-04-18 15:57:51 +1000 | [diff] [blame] | 392 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | prepare_singlestep(p, regs); |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 394 | kcb->kprobe_status = KPROBE_HIT_SS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | return 1; |
| 396 | |
| 397 | no_kprobe: |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 398 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | return ret; |
| 400 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 401 | NOKPROBE_SYMBOL(kprobe_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | |
| 403 | /* |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 404 | * Function return probe trampoline: |
| 405 | * - init_kprobes() establishes a probepoint here |
| 406 | * - When the probed function returns, this probe |
| 407 | * causes the handlers to fire |
| 408 | */ |
Thiago Jung Bauermann | 61ed9cf | 2016-03-31 17:10:40 -0300 | [diff] [blame] | 409 | asm(".global kretprobe_trampoline\n" |
| 410 | ".type kretprobe_trampoline, @function\n" |
| 411 | "kretprobe_trampoline:\n" |
| 412 | "nop\n" |
Anju T | 762df10 | 2017-02-08 15:20:52 +0530 | [diff] [blame] | 413 | "blr\n" |
Thiago Jung Bauermann | 61ed9cf | 2016-03-31 17:10:40 -0300 | [diff] [blame] | 414 | ".size kretprobe_trampoline, .-kretprobe_trampoline\n"); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 415 | |
| 416 | /* |
| 417 | * Called when the probe at kretprobe trampoline is hit |
| 418 | */ |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 419 | static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 420 | { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 421 | struct kretprobe_instance *ri = NULL; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 422 | struct hlist_head *head, empty_rp; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 423 | struct hlist_node *tmp; |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 424 | unsigned long flags, orig_ret_address = 0; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 425 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
| 426 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 427 | INIT_HLIST_HEAD(&empty_rp); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 428 | kretprobe_hash_lock(current, &head, &flags); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 429 | |
| 430 | /* |
| 431 | * It is possible to have multiple instances associated with a given |
| 432 | * task either because an multiple functions in the call path |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 433 | * have a return probe installed on them, and/or more than one return |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 434 | * return probe was registered for a target function. |
| 435 | * |
| 436 | * We can handle this because: |
| 437 | * - instances are always inserted at the head of the list |
| 438 | * - when multiple return probes are registered for the same |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 439 | * function, the first instance's ret_addr will point to the |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 440 | * real return address, and all the rest will point to |
| 441 | * kretprobe_trampoline |
| 442 | */ |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 443 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 444 | if (ri->task != current) |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 445 | /* another task is sharing our hash bucket */ |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 446 | continue; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 447 | |
| 448 | if (ri->rp && ri->rp->handler) |
| 449 | ri->rp->handler(ri, regs); |
| 450 | |
| 451 | orig_ret_address = (unsigned long)ri->ret_addr; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 452 | recycle_rp_inst(ri, &empty_rp); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 453 | |
| 454 | if (orig_ret_address != trampoline_address) |
| 455 | /* |
| 456 | * This is the real return address. Any other |
| 457 | * instances associated with this task are for |
| 458 | * other calls deeper on the call stack |
| 459 | */ |
| 460 | break; |
| 461 | } |
| 462 | |
Ananth N Mavinakayanahalli | 0f95b7f | 2007-05-08 00:28:27 -0700 | [diff] [blame] | 463 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 464 | regs->nip = orig_ret_address; |
Anju T | 762df10 | 2017-02-08 15:20:52 +0530 | [diff] [blame] | 465 | /* |
| 466 | * Make LR point to the orig_ret_address. |
| 467 | * When the 'nop' inside the kretprobe_trampoline |
| 468 | * is optimized, we can do a 'blr' after executing the |
| 469 | * detour buffer code. |
| 470 | */ |
| 471 | regs->link = orig_ret_address; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 472 | |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 473 | reset_current_kprobe(); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 474 | kretprobe_hash_unlock(current, &flags); |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 475 | preempt_enable_no_resched(); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 476 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 477 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 478 | hlist_del(&ri->hlist); |
| 479 | kfree(ri); |
| 480 | } |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 481 | /* |
| 482 | * By returning a non-zero value, we are telling |
| 483 | * kprobe_handler() that we don't want the post_handler |
| 484 | * to run (and have re-enabled preemption) |
| 485 | */ |
| 486 | return 1; |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 487 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 488 | NOKPROBE_SYMBOL(trampoline_probe_handler); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 489 | |
| 490 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | * Called after single-stepping. p->addr is the address of the |
| 492 | * instruction whose first byte has been replaced by the "breakpoint" |
| 493 | * instruction. To avoid the SMP problems that can occur when we |
| 494 | * temporarily put back the original opcode to single-step, we |
| 495 | * single-stepped a copy of the instruction. The address of this |
| 496 | * copy is p->ainsn.insn. |
| 497 | */ |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 498 | int kprobe_post_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | { |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 500 | struct kprobe *cur = kprobe_running(); |
| 501 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 502 | |
Naveen N. Rao | 6cc89ba | 2016-11-21 22:36:41 +0530 | [diff] [blame] | 503 | if (!cur || user_mode(regs)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | return 0; |
| 505 | |
Kumar Gala | b76e59d | 2008-06-26 01:57:58 -0500 | [diff] [blame] | 506 | /* make sure we got here for instruction we have a kprobe on */ |
| 507 | if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) |
| 508 | return 0; |
| 509 | |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 510 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 511 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 512 | cur->post_handler(cur, regs, 0); |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 513 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | |
Ananth N Mavinakayanahalli | db97bc7 | 2010-05-27 19:19:20 +0000 | [diff] [blame] | 515 | /* Adjust nip to after the single-stepped instruction */ |
| 516 | regs->nip = (unsigned long)cur->addr + 4; |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 517 | regs->msr |= kcb->kprobe_saved_msr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 519 | /*Restore back the original saved kprobes variables and continue. */ |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 520 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 521 | restore_previous_kprobe(kcb); |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 522 | goto out; |
| 523 | } |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 524 | reset_current_kprobe(); |
Prasanna S Panchamukhi | 42cc206 | 2005-06-23 00:09:38 -0700 | [diff] [blame] | 525 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | preempt_enable_no_resched(); |
| 527 | |
| 528 | /* |
| 529 | * if somebody else is singlestepping across a probe point, msr |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 530 | * will have DE/SE set, in which case, continue the remaining processing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | * of do_debug, as if this is not a probe hit. |
| 532 | */ |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 533 | if (regs->msr & MSR_SINGLESTEP) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | return 0; |
| 535 | |
| 536 | return 1; |
| 537 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 538 | NOKPROBE_SYMBOL(kprobe_post_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 540 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 542 | struct kprobe *cur = kprobe_running(); |
| 543 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 544 | const struct exception_table_entry *entry; |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 545 | |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 546 | switch(kcb->kprobe_status) { |
| 547 | case KPROBE_HIT_SS: |
| 548 | case KPROBE_REENTER: |
| 549 | /* |
| 550 | * We are here because the instruction being single |
| 551 | * stepped caused a page fault. We reset the current |
| 552 | * kprobe and the nip points back to the probe address |
| 553 | * and allow the page fault handler to continue as a |
| 554 | * normal page fault. |
| 555 | */ |
| 556 | regs->nip = (unsigned long)cur->addr; |
Kumar Gala | f827962 | 2008-06-26 02:01:37 -0500 | [diff] [blame] | 557 | regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 558 | regs->msr |= kcb->kprobe_saved_msr; |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 559 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 560 | restore_previous_kprobe(kcb); |
| 561 | else |
| 562 | reset_current_kprobe(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | preempt_enable_no_resched(); |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 564 | break; |
| 565 | case KPROBE_HIT_ACTIVE: |
| 566 | case KPROBE_HIT_SSDONE: |
| 567 | /* |
| 568 | * We increment the nmissed count for accounting, |
Anoop Thomas Mathew | 23d6d3d | 2013-09-20 09:25:41 +0530 | [diff] [blame] | 569 | * we can also use npre/npostfault count for accounting |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 570 | * these specific fault cases. |
| 571 | */ |
| 572 | kprobes_inc_nmissed_count(cur); |
| 573 | |
| 574 | /* |
| 575 | * We come here because instructions in the pre/post |
| 576 | * handler caused the page_fault, this could happen |
| 577 | * if handler tries to access user space by |
| 578 | * copy_from_user(), get_user() etc. Let the |
| 579 | * user-specified handler try to fix it first. |
| 580 | */ |
| 581 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 582 | return 1; |
| 583 | |
| 584 | /* |
| 585 | * In case the user-specified fault handler returned |
| 586 | * zero, try to fix up. |
| 587 | */ |
| 588 | if ((entry = search_exception_tables(regs->nip)) != NULL) { |
Nicholas Piggin | 61a92f7 | 2016-10-14 16:47:31 +1100 | [diff] [blame] | 589 | regs->nip = extable_fixup(entry); |
Prasanna S Panchamukhi | 50e21f2 | 2006-03-26 01:38:24 -0800 | [diff] [blame] | 590 | return 1; |
| 591 | } |
| 592 | |
| 593 | /* |
| 594 | * fixup_exception() could not handle it, |
| 595 | * Let do_page_fault() fix it. |
| 596 | */ |
| 597 | break; |
| 598 | default: |
| 599 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | } |
| 601 | return 0; |
| 602 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 603 | NOKPROBE_SYMBOL(kprobe_fault_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | |
Michael Ellerman | 3d7e338 | 2007-07-19 01:48:11 -0700 | [diff] [blame] | 605 | unsigned long arch_deref_entry_point(void *entry) |
| 606 | { |
Michael Ellerman | 2f0143c | 2014-06-23 13:23:31 +1000 | [diff] [blame] | 607 | return ppc_global_function_entry(entry); |
Michael Ellerman | 3d7e338 | 2007-07-19 01:48:11 -0700 | [diff] [blame] | 608 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 609 | NOKPROBE_SYMBOL(arch_deref_entry_point); |
Michael Ellerman | 3d7e338 | 2007-07-19 01:48:11 -0700 | [diff] [blame] | 610 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 611 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | { |
| 613 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 614 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 616 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | |
| 618 | /* setup return addr to the jprobe handler routine */ |
Michael Ellerman | 3d7e338 | 2007-07-19 01:48:11 -0700 | [diff] [blame] | 619 | regs->nip = arch_deref_entry_point(jp->entry); |
Michael Ellerman | f55d966 | 2016-06-06 22:26:10 +0530 | [diff] [blame] | 620 | #ifdef PPC64_ELF_ABI_v2 |
Michael Ellerman | 2f0143c | 2014-06-23 13:23:31 +1000 | [diff] [blame] | 621 | regs->gpr[12] = (unsigned long)jp->entry; |
Michael Ellerman | f55d966 | 2016-06-06 22:26:10 +0530 | [diff] [blame] | 622 | #elif defined(PPC64_ELF_ABI_v1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); |
Kumar Gala | 8209003 | 2007-02-06 22:55:19 -0600 | [diff] [blame] | 624 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Naveen N. Rao | a9f8553 | 2017-06-01 16:18:15 +0530 | [diff] [blame] | 626 | /* |
| 627 | * jprobes use jprobe_return() which skips the normal return |
| 628 | * path of the function, and this messes up the accounting of the |
| 629 | * function graph tracer. |
| 630 | * |
| 631 | * Pause function graph tracing while performing the jprobe function. |
| 632 | */ |
| 633 | pause_graph_tracing(); |
| 634 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | return 1; |
| 636 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 637 | NOKPROBE_SYMBOL(setjmp_pre_handler); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 639 | void __used jprobe_return(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | { |
| 641 | asm volatile("trap" ::: "memory"); |
| 642 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 643 | NOKPROBE_SYMBOL(jprobe_return); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 645 | static void __used jprobe_return_end(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | { |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 647 | } |
| 648 | NOKPROBE_SYMBOL(jprobe_return_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 650 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | { |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 652 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 653 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | /* |
| 655 | * FIXME - we should ideally be validating that we got here 'cos |
| 656 | * of the "trap" in jprobe_return() above, before restoring the |
| 657 | * saved regs... |
| 658 | */ |
Ananth N Mavinakayanahalli | 0dc036c | 2005-11-07 01:00:10 -0800 | [diff] [blame] | 659 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
Naveen N. Rao | a9f8553 | 2017-06-01 16:18:15 +0530 | [diff] [blame] | 660 | /* It's OK to start function graph tracing again */ |
| 661 | unpause_graph_tracing(); |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 662 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | return 1; |
| 664 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 665 | NOKPROBE_SYMBOL(longjmp_break_handler); |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 666 | |
| 667 | static struct kprobe trampoline_p = { |
| 668 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
| 669 | .pre_handler = trampoline_probe_handler |
| 670 | }; |
| 671 | |
Rusty Lynch | 6772926 | 2005-07-05 18:54:50 -0700 | [diff] [blame] | 672 | int __init arch_init_kprobes(void) |
Rusty Lynch | 97f7943 | 2005-06-27 15:17:15 -0700 | [diff] [blame] | 673 | { |
| 674 | return register_kprobe(&trampoline_p); |
| 675 | } |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 676 | |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 677 | int arch_trampoline_kprobe(struct kprobe *p) |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 678 | { |
| 679 | if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) |
| 680 | return 1; |
| 681 | |
| 682 | return 0; |
| 683 | } |
Naveen N. Rao | 71f6e58 | 2017-04-12 16:48:51 +0530 | [diff] [blame] | 684 | NOKPROBE_SYMBOL(arch_trampoline_kprobe); |