Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 18 | * Copyright IBM Corp. 2002, 2006 |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 19 | * |
| 20 | * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> |
| 21 | */ |
| 22 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 23 | #include <linux/kprobes.h> |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/preempt.h> |
| 26 | #include <linux/stop_machine.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 27 | #include <linux/kdebug.h> |
Heiko Carstens | a2b5367 | 2009-06-12 10:26:43 +0200 | [diff] [blame] | 28 | #include <linux/uaccess.h> |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 29 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 30 | #include <linux/slab.h> |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 31 | #include <linux/hardirq.h> |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 32 | #include <linux/ftrace.h> |
Heiko Carstens | a882b3b | 2013-09-13 13:59:26 +0200 | [diff] [blame] | 33 | #include <asm/cacheflush.h> |
| 34 | #include <asm/sections.h> |
| 35 | #include <asm/dis.h> |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 36 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 37 | DEFINE_PER_CPU(struct kprobe *, current_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 38 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
| 39 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 40 | struct kretprobe_blackpoint kretprobe_blacklist[] = { }; |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 41 | |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 42 | DEFINE_INSN_CACHE_OPS(dmainsn); |
| 43 | |
| 44 | static void *alloc_dmainsn_page(void) |
| 45 | { |
| 46 | return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); |
| 47 | } |
| 48 | |
| 49 | static void free_dmainsn_page(void *page) |
| 50 | { |
| 51 | free_page((unsigned long)page); |
| 52 | } |
| 53 | |
| 54 | struct kprobe_insn_cache kprobe_dmainsn_slots = { |
| 55 | .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), |
| 56 | .alloc = alloc_dmainsn_page, |
| 57 | .free = free_dmainsn_page, |
| 58 | .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), |
| 59 | .insn_size = MAX_INSN_SIZE, |
| 60 | }; |
| 61 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 62 | static void copy_instruction(struct kprobe *p) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 63 | { |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 64 | unsigned long ip = (unsigned long) p->addr; |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 65 | s64 disp, new_disp; |
| 66 | u64 addr, new_addr; |
| 67 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 68 | if (ftrace_location(ip) == ip) { |
| 69 | /* |
| 70 | * If kprobes patches the instruction that is morphed by |
| 71 | * ftrace make sure that kprobes always sees the branch |
Heiko Carstens | e6d60b3 | 2015-01-09 13:08:28 +0100 | [diff] [blame] | 72 | * "jg .+24" that skips the mcount block or the "brcl 0,0" |
| 73 | * in case of hotpatch. |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 74 | */ |
| 75 | ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); |
| 76 | p->ainsn.is_ftrace_insn = 1; |
| 77 | } else |
Heiko Carstens | ed7d56e | 2014-11-28 12:36:48 +0100 | [diff] [blame] | 78 | memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 79 | p->opcode = p->ainsn.insn[0]; |
Jan Willeke | 975fab1 | 2014-09-22 16:37:27 +0200 | [diff] [blame] | 80 | if (!probe_is_insn_relative_long(p->ainsn.insn)) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 81 | return; |
| 82 | /* |
| 83 | * For pc-relative instructions in RIL-b or RIL-c format patch the |
| 84 | * RI2 displacement field. We have already made sure that the insn |
| 85 | * slot for the patched instruction is within the same 2GB area |
| 86 | * as the original instruction (either kernel image or module area). |
| 87 | * Therefore the new displacement will always fit. |
| 88 | */ |
| 89 | disp = *(s32 *)&p->ainsn.insn[1]; |
| 90 | addr = (u64)(unsigned long)p->addr; |
| 91 | new_addr = (u64)(unsigned long)p->ainsn.insn; |
| 92 | new_disp = ((addr + (disp * 2)) - new_addr) / 2; |
| 93 | *(s32 *)&p->ainsn.insn[1] = new_disp; |
| 94 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 95 | NOKPROBE_SYMBOL(copy_instruction); |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 96 | |
| 97 | static inline int is_kernel_addr(void *addr) |
| 98 | { |
| 99 | return addr < (void *)_end; |
| 100 | } |
| 101 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 102 | static int s390_get_insn_slot(struct kprobe *p) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 103 | { |
| 104 | /* |
| 105 | * Get an insn slot that is within the same 2GB area like the original |
| 106 | * instruction. That way instructions with a 32bit signed displacement |
| 107 | * field can be patched and executed within the insn slot. |
| 108 | */ |
| 109 | p->ainsn.insn = NULL; |
| 110 | if (is_kernel_addr(p->addr)) |
| 111 | p->ainsn.insn = get_dmainsn_slot(); |
Heiko Carstens | fcd05b5 | 2013-09-02 10:31:11 +0200 | [diff] [blame] | 112 | else if (is_module_addr(p->addr)) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 113 | p->ainsn.insn = get_insn_slot(); |
| 114 | return p->ainsn.insn ? 0 : -ENOMEM; |
| 115 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 116 | NOKPROBE_SYMBOL(s390_get_insn_slot); |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 117 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 118 | static void s390_free_insn_slot(struct kprobe *p) |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 119 | { |
| 120 | if (!p->ainsn.insn) |
| 121 | return; |
| 122 | if (is_kernel_addr(p->addr)) |
| 123 | free_dmainsn_slot(p->ainsn.insn, 0); |
| 124 | else |
| 125 | free_insn_slot(p->ainsn.insn, 0); |
| 126 | p->ainsn.insn = NULL; |
| 127 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 128 | NOKPROBE_SYMBOL(s390_free_insn_slot); |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 129 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 130 | int arch_prepare_kprobe(struct kprobe *p) |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 131 | { |
| 132 | if ((unsigned long) p->addr & 0x01) |
| 133 | return -EINVAL; |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 134 | /* Make sure the probe isn't going on a difficult instruction */ |
Jan Willeke | 975fab1 | 2014-09-22 16:37:27 +0200 | [diff] [blame] | 135 | if (probe_is_prohibited_opcode(p->addr)) |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 136 | return -EINVAL; |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 137 | if (s390_get_insn_slot(p)) |
| 138 | return -ENOMEM; |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 139 | copy_instruction(p); |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 140 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 141 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 142 | NOKPROBE_SYMBOL(arch_prepare_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 143 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 144 | int arch_check_ftrace_location(struct kprobe *p) |
| 145 | { |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | struct swap_insn_args { |
| 150 | struct kprobe *p; |
| 151 | unsigned int arm_kprobe : 1; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 152 | }; |
| 153 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 154 | static int swap_instruction(void *data) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 155 | { |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 156 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 157 | unsigned long status = kcb->kprobe_status; |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 158 | struct swap_insn_args *args = data; |
| 159 | struct ftrace_insn new_insn, *insn; |
| 160 | struct kprobe *p = args->p; |
| 161 | size_t len; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 162 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 163 | new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; |
| 164 | len = sizeof(new_insn.opc); |
| 165 | if (!p->ainsn.is_ftrace_insn) |
| 166 | goto skip_ftrace; |
| 167 | len = sizeof(new_insn); |
| 168 | insn = (struct ftrace_insn *) p->addr; |
| 169 | if (args->arm_kprobe) { |
| 170 | if (is_ftrace_nop(insn)) |
| 171 | new_insn.disp = KPROBE_ON_FTRACE_NOP; |
| 172 | else |
| 173 | new_insn.disp = KPROBE_ON_FTRACE_CALL; |
| 174 | } else { |
| 175 | ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); |
| 176 | if (insn->disp == KPROBE_ON_FTRACE_NOP) |
| 177 | ftrace_generate_nop_insn(&new_insn); |
| 178 | } |
| 179 | skip_ftrace: |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 180 | kcb->kprobe_status = KPROBE_SWAP_INST; |
Heiko Carstens | 8a5d847 | 2015-03-13 12:55:56 +0100 | [diff] [blame] | 181 | s390_kernel_write(p->addr, &new_insn, len); |
Heiko Carstens | acf0180 | 2009-06-22 12:08:23 +0200 | [diff] [blame] | 182 | kcb->kprobe_status = status; |
Martin Schwidefsky | 5a8b589 | 2011-01-05 12:47:18 +0100 | [diff] [blame] | 183 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 184 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 185 | NOKPROBE_SYMBOL(swap_instruction); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 186 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 187 | void arch_arm_kprobe(struct kprobe *p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 188 | { |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 189 | struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 190 | |
Rusty Russell | 9b1a4d3 | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 191 | stop_machine(swap_instruction, &args, NULL); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 192 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 193 | NOKPROBE_SYMBOL(arch_arm_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 194 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 195 | void arch_disarm_kprobe(struct kprobe *p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 196 | { |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 197 | struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 198 | |
Rusty Russell | 9b1a4d3 | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 199 | stop_machine(swap_instruction, &args, NULL); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 200 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 201 | NOKPROBE_SYMBOL(arch_disarm_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 202 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 203 | void arch_remove_kprobe(struct kprobe *p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 204 | { |
Heiko Carstens | 63c4043 | 2013-09-11 14:24:14 -0700 | [diff] [blame] | 205 | s390_free_insn_slot(p); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 206 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 207 | NOKPROBE_SYMBOL(arch_remove_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 208 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 209 | static void enable_singlestep(struct kprobe_ctlblk *kcb, |
| 210 | struct pt_regs *regs, |
| 211 | unsigned long ip) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 212 | { |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 213 | struct per_regs per_kprobe; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 214 | |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 215 | /* Set up the PER control registers %cr9-%cr11 */ |
| 216 | per_kprobe.control = PER_EVENT_IFETCH; |
| 217 | per_kprobe.start = ip; |
| 218 | per_kprobe.end = ip; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 219 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 220 | /* Save control regs and psw mask */ |
| 221 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
| 222 | kcb->kprobe_saved_imask = regs->psw.mask & |
| 223 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); |
| 224 | |
| 225 | /* Set PER control regs, turns on single step for the given address */ |
Martin Schwidefsky | 5e9a269 | 2011-01-05 12:48:10 +0100 | [diff] [blame] | 226 | __ctl_load(per_kprobe, 9, 11); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 227 | regs->psw.mask |= PSW_MASK_PER; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 228 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 229 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 230 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 231 | NOKPROBE_SYMBOL(enable_singlestep); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 232 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 233 | static void disable_singlestep(struct kprobe_ctlblk *kcb, |
| 234 | struct pt_regs *regs, |
| 235 | unsigned long ip) |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 236 | { |
| 237 | /* Restore control regs and psw mask, set new psw address */ |
| 238 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); |
| 239 | regs->psw.mask &= ~PSW_MASK_PER; |
| 240 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 241 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
| 242 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 243 | NOKPROBE_SYMBOL(disable_singlestep); |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 244 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 245 | /* |
| 246 | * Activate a kprobe by storing its pointer to current_kprobe. The |
| 247 | * previous kprobe is stored in kcb->prev_kprobe. A stack of up to |
| 248 | * two kprobes can be active, see KPROBE_REENTER. |
| 249 | */ |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 250 | static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 251 | { |
Christoph Lameter | eb7e7d7 | 2014-08-17 12:30:45 -0500 | [diff] [blame] | 252 | kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 253 | kcb->prev_kprobe.status = kcb->kprobe_status; |
Christoph Lameter | eb7e7d7 | 2014-08-17 12:30:45 -0500 | [diff] [blame] | 254 | __this_cpu_write(current_kprobe, p); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 255 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 256 | NOKPROBE_SYMBOL(push_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 257 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 258 | /* |
| 259 | * Deactivate a kprobe by backing up to the previous state. If the |
| 260 | * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, |
| 261 | * for any other state prev_kprobe.kp will be NULL. |
| 262 | */ |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 263 | static void pop_kprobe(struct kprobe_ctlblk *kcb) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 264 | { |
Christoph Lameter | eb7e7d7 | 2014-08-17 12:30:45 -0500 | [diff] [blame] | 265 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 266 | kcb->kprobe_status = kcb->prev_kprobe.status; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 267 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 268 | NOKPROBE_SYMBOL(pop_kprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 269 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 270 | void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 271 | { |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 272 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 273 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 274 | /* Replace the return addr with trampoline addr */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 275 | regs->gprs[14] = (unsigned long) &kretprobe_trampoline; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 276 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 277 | NOKPROBE_SYMBOL(arch_prepare_kretprobe); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 278 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 279 | static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 280 | { |
| 281 | switch (kcb->kprobe_status) { |
| 282 | case KPROBE_HIT_SSDONE: |
| 283 | case KPROBE_HIT_ACTIVE: |
| 284 | kprobes_inc_nmissed_count(p); |
| 285 | break; |
| 286 | case KPROBE_HIT_SS: |
| 287 | case KPROBE_REENTER: |
| 288 | default: |
| 289 | /* |
| 290 | * A kprobe on the code path to single step an instruction |
| 291 | * is a BUG. The code path resides in the .kprobes.text |
| 292 | * section and is executed with interrupts disabled. |
| 293 | */ |
| 294 | printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); |
| 295 | dump_kprobe(p); |
| 296 | BUG(); |
| 297 | } |
| 298 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 299 | NOKPROBE_SYMBOL(kprobe_reenter_check); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 300 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 301 | static int kprobe_handler(struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 302 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 303 | struct kprobe_ctlblk *kcb; |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 304 | struct kprobe *p; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 305 | |
| 306 | /* |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 307 | * We want to disable preemption for the entire duration of kprobe |
| 308 | * processing. That includes the calls to the pre/post handlers |
| 309 | * and single stepping the kprobe instruction. |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 310 | */ |
| 311 | preempt_disable(); |
| 312 | kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 313 | p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 314 | |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 315 | if (p) { |
| 316 | if (kprobe_running()) { |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 317 | /* |
| 318 | * We have hit a kprobe while another is still |
| 319 | * active. This can happen in the pre and post |
| 320 | * handler. Single step the instruction of the |
| 321 | * new probe but do not call any handler function |
| 322 | * of this secondary kprobe. |
| 323 | * push_kprobe and pop_kprobe saves and restores |
| 324 | * the currently active kprobe. |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 325 | */ |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 326 | kprobe_reenter_check(kcb, p); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 327 | push_kprobe(kcb, p); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 328 | kcb->kprobe_status = KPROBE_REENTER; |
| 329 | } else { |
| 330 | /* |
| 331 | * If we have no pre-handler or it returned 0, we |
| 332 | * continue with single stepping. If we have a |
| 333 | * pre-handler and it returned non-zero, it prepped |
| 334 | * for calling the break_handler below on re-entry |
| 335 | * for jprobe processing, so get out doing nothing |
| 336 | * more here. |
| 337 | */ |
| 338 | push_kprobe(kcb, p); |
| 339 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 340 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 341 | return 1; |
| 342 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 343 | } |
| 344 | enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); |
| 345 | return 1; |
| 346 | } else if (kprobe_running()) { |
Christoph Lameter | eb7e7d7 | 2014-08-17 12:30:45 -0500 | [diff] [blame] | 347 | p = __this_cpu_read(current_kprobe); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 348 | if (p->break_handler && p->break_handler(p, regs)) { |
| 349 | /* |
| 350 | * Continuation after the jprobe completed and |
| 351 | * caused the jprobe_return trap. The jprobe |
| 352 | * break_handler "returns" to the original |
| 353 | * function that still has the kprobe breakpoint |
| 354 | * installed. We continue with single stepping. |
| 355 | */ |
| 356 | kcb->kprobe_status = KPROBE_HIT_SS; |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 357 | enable_singlestep(kcb, regs, |
| 358 | (unsigned long) p->ainsn.insn); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 359 | return 1; |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 360 | } /* else: |
| 361 | * No kprobe at this address and the current kprobe |
| 362 | * has no break handler (no jprobe!). The kernel just |
| 363 | * exploded, let the standard trap handler pick up the |
| 364 | * pieces. |
| 365 | */ |
| 366 | } /* else: |
| 367 | * No kprobe at this address and no active kprobe. The trap has |
| 368 | * not been caused by a kprobe breakpoint. The race of breakpoint |
| 369 | * vs. kprobe remove does not exist because on s390 as we use |
| 370 | * stop_machine to arm/disarm the breakpoints. |
| 371 | */ |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 372 | preempt_enable_no_resched(); |
Martin Schwidefsky | 0e917cc | 2011-01-05 12:47:23 +0100 | [diff] [blame] | 373 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 374 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 375 | NOKPROBE_SYMBOL(kprobe_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 376 | |
| 377 | /* |
| 378 | * Function return probe trampoline: |
| 379 | * - init_kprobes() establishes a probepoint here |
| 380 | * - When the probed function returns, this probe |
| 381 | * causes the handlers to fire |
| 382 | */ |
Heiko Carstens | a806170 | 2008-04-17 07:46:26 +0200 | [diff] [blame] | 383 | static void __used kretprobe_trampoline_holder(void) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 384 | { |
| 385 | asm volatile(".global kretprobe_trampoline\n" |
| 386 | "kretprobe_trampoline: bcr 0,0\n"); |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Called when the probe at kretprobe trampoline is hit |
| 391 | */ |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 392 | static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 393 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 394 | struct kretprobe_instance *ri; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 395 | struct hlist_head *head, empty_rp; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 396 | struct hlist_node *tmp; |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 397 | unsigned long flags, orig_ret_address; |
| 398 | unsigned long trampoline_address; |
| 399 | kprobe_opcode_t *correct_ret_addr; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 400 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 401 | INIT_HLIST_HEAD(&empty_rp); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 402 | kretprobe_hash_lock(current, &head, &flags); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 403 | |
| 404 | /* |
| 405 | * It is possible to have multiple instances associated with a given |
| 406 | * task either because an multiple functions in the call path |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 407 | * have a return probe installed on them, and/or more than one return |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 408 | * return probe was registered for a target function. |
| 409 | * |
| 410 | * We can handle this because: |
| 411 | * - instances are always inserted at the head of the list |
| 412 | * - when multiple return probes are registered for the same |
| 413 | * function, the first instance's ret_addr will point to the |
| 414 | * real return address, and all the rest will point to |
| 415 | * kretprobe_trampoline |
| 416 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 417 | ri = NULL; |
| 418 | orig_ret_address = 0; |
| 419 | correct_ret_addr = NULL; |
| 420 | trampoline_address = (unsigned long) &kretprobe_trampoline; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 421 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 422 | if (ri->task != current) |
| 423 | /* another task is sharing our hash bucket */ |
| 424 | continue; |
| 425 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 426 | orig_ret_address = (unsigned long) ri->ret_addr; |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 427 | |
| 428 | if (orig_ret_address != trampoline_address) |
| 429 | /* |
| 430 | * This is the real return address. Any other |
| 431 | * instances associated with this task are for |
| 432 | * other calls deeper on the call stack |
| 433 | */ |
| 434 | break; |
| 435 | } |
| 436 | |
| 437 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
| 438 | |
| 439 | correct_ret_addr = ri->ret_addr; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 440 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 441 | if (ri->task != current) |
| 442 | /* another task is sharing our hash bucket */ |
| 443 | continue; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 444 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 445 | orig_ret_address = (unsigned long) ri->ret_addr; |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 446 | |
| 447 | if (ri->rp && ri->rp->handler) { |
| 448 | ri->ret_addr = correct_ret_addr; |
| 449 | ri->rp->handler(ri, regs); |
| 450 | } |
| 451 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 452 | recycle_rp_inst(ri, &empty_rp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 453 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 454 | if (orig_ret_address != trampoline_address) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 455 | /* |
| 456 | * This is the real return address. Any other |
| 457 | * instances associated with this task are for |
| 458 | * other calls deeper on the call stack |
| 459 | */ |
| 460 | break; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 461 | } |
Martin Schwidefsky | 8948080 | 2010-11-10 10:05:58 +0100 | [diff] [blame] | 462 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 463 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
| 464 | |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 465 | pop_kprobe(get_kprobe_ctlblk()); |
Srinivasa D S | ef53d9c | 2008-07-25 01:46:04 -0700 | [diff] [blame] | 466 | kretprobe_hash_unlock(current, &flags); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 467 | preempt_enable_no_resched(); |
| 468 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 469 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 470 | hlist_del(&ri->hlist); |
| 471 | kfree(ri); |
| 472 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 473 | /* |
| 474 | * By returning a non-zero value, we are telling |
| 475 | * kprobe_handler() that we don't want the post_handler |
| 476 | * to run (and have re-enabled preemption) |
| 477 | */ |
| 478 | return 1; |
| 479 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 480 | NOKPROBE_SYMBOL(trampoline_probe_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 481 | |
| 482 | /* |
| 483 | * Called after single-stepping. p->addr is the address of the |
| 484 | * instruction whose first byte has been replaced by the "breakpoint" |
| 485 | * instruction. To avoid the SMP problems that can occur when we |
| 486 | * temporarily put back the original opcode to single-step, we |
| 487 | * single-stepped a copy of the instruction. The address of this |
| 488 | * copy is p->ainsn.insn. |
| 489 | */ |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 490 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 491 | { |
| 492 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 493 | unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; |
Jan Willeke | 975fab1 | 2014-09-22 16:37:27 +0200 | [diff] [blame] | 494 | int fixup = probe_get_fixup_type(p->ainsn.insn); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 495 | |
Heiko Carstens | c933146 | 2014-10-15 12:17:38 +0200 | [diff] [blame] | 496 | /* Check if the kprobes location is an enabled ftrace caller */ |
| 497 | if (p->ainsn.is_ftrace_insn) { |
| 498 | struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; |
| 499 | struct ftrace_insn call_insn; |
| 500 | |
| 501 | ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); |
| 502 | /* |
| 503 | * A kprobe on an enabled ftrace call site actually single |
| 504 | * stepped an unconditional branch (ftrace nop equivalent). |
| 505 | * Now we need to fixup things and pretend that a brasl r0,... |
| 506 | * was executed instead. |
| 507 | */ |
| 508 | if (insn->disp == KPROBE_ON_FTRACE_CALL) { |
| 509 | ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; |
| 510 | regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); |
| 511 | } |
| 512 | } |
| 513 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 514 | if (fixup & FIXUP_PSW_NORMAL) |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 515 | ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 516 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 517 | if (fixup & FIXUP_BRANCH_NOT_TAKEN) { |
Heiko Carstens | a882b3b | 2013-09-13 13:59:26 +0200 | [diff] [blame] | 518 | int ilen = insn_length(p->ainsn.insn[0] >> 8); |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 519 | if (ip - (unsigned long) p->ainsn.insn == ilen) |
| 520 | ip = (unsigned long) p->addr + ilen; |
| 521 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 522 | |
Martin Schwidefsky | ba640a5 | 2011-01-05 12:47:19 +0100 | [diff] [blame] | 523 | if (fixup & FIXUP_RETURN_REGISTER) { |
| 524 | int reg = (p->ainsn.insn[0] & 0xf0) >> 4; |
| 525 | regs->gprs[reg] += (unsigned long) p->addr - |
| 526 | (unsigned long) p->ainsn.insn; |
| 527 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 528 | |
Martin Schwidefsky | fc0a1fe | 2011-01-05 12:47:17 +0100 | [diff] [blame] | 529 | disable_singlestep(kcb, regs, ip); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 530 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 531 | NOKPROBE_SYMBOL(resume_execution); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 532 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 533 | static int post_kprobe_handler(struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 534 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 535 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 536 | struct kprobe *p = kprobe_running(); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 537 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 538 | if (!p) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 539 | return 0; |
| 540 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 541 | if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 542 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 543 | p->post_handler(p, regs, 0); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 544 | } |
| 545 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 546 | resume_execution(p, regs); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 547 | pop_kprobe(kcb); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 548 | preempt_enable_no_resched(); |
| 549 | |
| 550 | /* |
| 551 | * if somebody else is singlestepping across a probe point, psw mask |
| 552 | * will have PER set, in which case, continue the remaining processing |
| 553 | * of do_single_step, as if this is not a probe hit. |
| 554 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 555 | if (regs->psw.mask & PSW_MASK_PER) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 556 | return 0; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 557 | |
| 558 | return 1; |
| 559 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 560 | NOKPROBE_SYMBOL(post_kprobe_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 561 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 562 | static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 563 | { |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 564 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 565 | struct kprobe *p = kprobe_running(); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 566 | const struct exception_table_entry *entry; |
| 567 | |
| 568 | switch(kcb->kprobe_status) { |
| 569 | case KPROBE_SWAP_INST: |
| 570 | /* We are here because the instruction replacement failed */ |
| 571 | return 0; |
| 572 | case KPROBE_HIT_SS: |
| 573 | case KPROBE_REENTER: |
| 574 | /* |
| 575 | * We are here because the instruction being single |
| 576 | * stepped caused a page fault. We reset the current |
| 577 | * kprobe and the nip points back to the probe address |
| 578 | * and allow the page fault handler to continue as a |
| 579 | * normal page fault. |
| 580 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 581 | disable_singlestep(kcb, regs, (unsigned long) p->addr); |
Martin Schwidefsky | b959979 | 2011-01-05 12:47:20 +0100 | [diff] [blame] | 582 | pop_kprobe(kcb); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 583 | preempt_enable_no_resched(); |
| 584 | break; |
| 585 | case KPROBE_HIT_ACTIVE: |
| 586 | case KPROBE_HIT_SSDONE: |
| 587 | /* |
| 588 | * We increment the nmissed count for accounting, |
Anoop Thomas Mathew | 23d6d3d | 2013-09-20 09:25:41 +0530 | [diff] [blame] | 589 | * we can also use npre/npostfault count for accounting |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 590 | * these specific fault cases. |
| 591 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 592 | kprobes_inc_nmissed_count(p); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 593 | |
| 594 | /* |
| 595 | * We come here because instructions in the pre/post |
| 596 | * handler caused the page_fault, this could happen |
| 597 | * if handler tries to access user space by |
| 598 | * copy_from_user(), get_user() etc. Let the |
| 599 | * user-specified handler try to fix it first. |
| 600 | */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 601 | if (p->fault_handler && p->fault_handler(p, regs, trapnr)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 602 | return 1; |
| 603 | |
| 604 | /* |
| 605 | * In case the user-specified fault handler returned |
| 606 | * zero, try to fix up. |
| 607 | */ |
| 608 | entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
| 609 | if (entry) { |
Heiko Carstens | eb608fb | 2012-09-05 13:26:11 +0200 | [diff] [blame] | 610 | regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 611 | return 1; |
| 612 | } |
| 613 | |
| 614 | /* |
| 615 | * fixup_exception() could not handle it, |
| 616 | * Let do_page_fault() fix it. |
| 617 | */ |
| 618 | break; |
| 619 | default: |
| 620 | break; |
| 621 | } |
| 622 | return 0; |
| 623 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 624 | NOKPROBE_SYMBOL(kprobe_trap_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 625 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 626 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 627 | { |
| 628 | int ret; |
| 629 | |
| 630 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 631 | local_irq_disable(); |
| 632 | ret = kprobe_trap_handler(regs, trapnr); |
| 633 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 634 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); |
| 635 | return ret; |
| 636 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 637 | NOKPROBE_SYMBOL(kprobe_fault_handler); |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 638 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 639 | /* |
| 640 | * Wrapper routine to for handling exceptions. |
| 641 | */ |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 642 | int kprobe_exceptions_notify(struct notifier_block *self, |
| 643 | unsigned long val, void *data) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 644 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 645 | struct die_args *args = (struct die_args *) data; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 646 | struct pt_regs *regs = args->regs; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 647 | int ret = NOTIFY_DONE; |
| 648 | |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 649 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 650 | local_irq_disable(); |
| 651 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 652 | switch (val) { |
| 653 | case DIE_BPT: |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 654 | if (kprobe_handler(regs)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 655 | ret = NOTIFY_STOP; |
| 656 | break; |
| 657 | case DIE_SSTEP: |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 658 | if (post_kprobe_handler(regs)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 659 | ret = NOTIFY_STOP; |
| 660 | break; |
| 661 | case DIE_TRAP: |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 662 | if (!preemptible() && kprobe_running() && |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 663 | kprobe_trap_handler(regs, args->trapnr)) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 664 | ret = NOTIFY_STOP; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 665 | break; |
| 666 | default: |
| 667 | break; |
| 668 | } |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 669 | |
| 670 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) |
| 671 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); |
| 672 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 673 | return ret; |
| 674 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 675 | NOKPROBE_SYMBOL(kprobe_exceptions_notify); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 676 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 677 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 678 | { |
| 679 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 680 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 681 | unsigned long stack; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 682 | |
| 683 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
| 684 | |
| 685 | /* setup return addr to the jprobe handler routine */ |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 686 | regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; |
Martin Schwidefsky | adb4583 | 2010-11-10 10:05:57 +0100 | [diff] [blame] | 687 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 688 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 689 | /* r15 is the stack pointer */ |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 690 | stack = (unsigned long) regs->gprs[15]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 691 | |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 692 | memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 693 | return 1; |
| 694 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 695 | NOKPROBE_SYMBOL(setjmp_pre_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 696 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 697 | void jprobe_return(void) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 698 | { |
| 699 | asm volatile(".word 0x0002"); |
| 700 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 701 | NOKPROBE_SYMBOL(jprobe_return); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 702 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 703 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 704 | { |
| 705 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 706 | unsigned long stack; |
| 707 | |
| 708 | stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 709 | |
| 710 | /* Put the regs back */ |
| 711 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
| 712 | /* put the stack back */ |
Martin Schwidefsky | 92b8cbf | 2011-01-05 12:47:22 +0100 | [diff] [blame] | 713 | memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 714 | preempt_enable_no_resched(); |
| 715 | return 1; |
| 716 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 717 | NOKPROBE_SYMBOL(longjmp_break_handler); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 718 | |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 719 | static struct kprobe trampoline = { |
| 720 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 721 | .pre_handler = trampoline_probe_handler |
| 722 | }; |
| 723 | |
| 724 | int __init arch_init_kprobes(void) |
| 725 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 726 | return register_kprobe(&trampoline); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 727 | } |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 728 | |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 729 | int arch_trampoline_kprobe(struct kprobe *p) |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 730 | { |
Martin Schwidefsky | 4a18863 | 2011-01-05 12:47:24 +0100 | [diff] [blame] | 731 | return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; |
Ananth N Mavinakayanahalli | bf8f6e5b | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 732 | } |
Heiko Carstens | 7a5388d | 2014-10-22 12:42:38 +0200 | [diff] [blame] | 733 | NOKPROBE_SYMBOL(arch_trampoline_kprobe); |