Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 2 | * User-space Probes (UProbes) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
Ingo Molnar | 35aa621 | 2012-02-22 11:37:29 +0100 | [diff] [blame] | 18 | * Copyright (C) IBM Corporation, 2008-2012 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 19 | * Authors: |
| 20 | * Srikar Dronamraju |
| 21 | * Jim Keniston |
Ingo Molnar | 35aa621 | 2012-02-22 11:37:29 +0100 | [diff] [blame] | 22 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 23 | */ |
| 24 | |
| 25 | #include <linux/kernel.h> |
| 26 | #include <linux/highmem.h> |
| 27 | #include <linux/pagemap.h> /* read_mapping_page */ |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/rmap.h> /* anon_vma_prepare */ |
| 31 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ |
| 32 | #include <linux/swap.h> /* try_to_free_swap */ |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 33 | #include <linux/ptrace.h> /* user_enable_single_step */ |
| 34 | #include <linux/kdebug.h> /* notifier mechanism */ |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 35 | #include "../../mm/internal.h" /* munlock_vma_page */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 36 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 37 | #include <linux/uprobes.h> |
| 38 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 39 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
| 40 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
| 41 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 42 | static struct rb_root uprobes_tree = RB_ROOT; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 43 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 44 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
| 45 | |
| 46 | #define UPROBES_HASH_SZ 13 |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 47 | |
Peter Zijlstra | c5784de | 2012-06-15 17:43:39 +0200 | [diff] [blame] | 48 | /* |
| 49 | * We need separate register/unregister and mmap/munmap lock hashes because |
| 50 | * of mmap_sem nesting. |
| 51 | * |
| 52 | * uprobe_register() needs to install probes on (potentially) all processes |
| 53 | * and thus needs to acquire multiple mmap_sems (consequtively, not |
| 54 | * concurrently), whereas uprobe_mmap() is called while holding mmap_sem |
| 55 | * for the particular process doing the mmap. |
| 56 | * |
| 57 | * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem |
| 58 | * because of lock order against i_mmap_mutex. This means there's a hole in |
| 59 | * the register vma iteration where a mmap() can happen. |
| 60 | * |
| 61 | * Thus uprobe_register() can race with uprobe_mmap() and we can try and |
| 62 | * install a probe where one is already installed. |
| 63 | */ |
| 64 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 65 | /* serialize (un)register */ |
| 66 | static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 67 | |
| 68 | #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 69 | |
| 70 | /* serialize uprobe->pending_list */ |
| 71 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 72 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 73 | |
| 74 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 75 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 76 | * events active at this time. Probably a fine grained per inode count is |
| 77 | * better? |
| 78 | */ |
| 79 | static atomic_t uprobe_events = ATOMIC_INIT(0); |
| 80 | |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 81 | struct uprobe { |
| 82 | struct rb_node rb_node; /* node in the rb tree */ |
| 83 | atomic_t ref; |
| 84 | struct rw_semaphore consumer_rwsem; |
| 85 | struct list_head pending_list; |
| 86 | struct uprobe_consumer *consumers; |
| 87 | struct inode *inode; /* Also hold a ref to inode */ |
| 88 | loff_t offset; |
| 89 | int flags; |
| 90 | struct arch_uprobe arch; |
| 91 | }; |
| 92 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 93 | /* |
| 94 | * valid_vma: Verify if the specified vma is an executable vma |
| 95 | * Relax restrictions while unregistering: vm_flags might have |
| 96 | * changed after breakpoint was inserted. |
| 97 | * - is_register: indicates if we are in register context. |
| 98 | * - Return 1 if the specified virtual address is in an |
| 99 | * executable vma. |
| 100 | */ |
| 101 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) |
| 102 | { |
| 103 | if (!vma->vm_file) |
| 104 | return false; |
| 105 | |
| 106 | if (!is_register) |
| 107 | return true; |
| 108 | |
Oleg Nesterov | ea13137 | 2012-06-15 17:43:22 +0200 | [diff] [blame] | 109 | if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) |
| 110 | == (VM_READ|VM_EXEC)) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 111 | return true; |
| 112 | |
| 113 | return false; |
| 114 | } |
| 115 | |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 116 | static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 117 | { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 118 | return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 119 | } |
| 120 | |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 121 | static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) |
| 122 | { |
| 123 | return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); |
| 124 | } |
| 125 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 126 | /** |
| 127 | * __replace_page - replace page in vma by new page. |
| 128 | * based on replace_page in mm/ksm.c |
| 129 | * |
| 130 | * @vma: vma that holds the pte pointing to page |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 131 | * @addr: address the old @page is mapped at |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 132 | * @page: the cowed page we are replacing by kpage |
| 133 | * @kpage: the modified page we replace page by |
| 134 | * |
| 135 | * Returns 0 on success, -EFAULT on failure. |
| 136 | */ |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 137 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
| 138 | struct page *page, struct page *kpage) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 139 | { |
| 140 | struct mm_struct *mm = vma->vm_mm; |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 141 | spinlock_t *ptl; |
| 142 | pte_t *ptep; |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 143 | int err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 144 | |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 145 | /* For try_to_free_swap() and munlock_vma_page() below */ |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 146 | lock_page(page); |
| 147 | |
| 148 | err = -EAGAIN; |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 149 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 150 | if (!ptep) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 151 | goto unlock; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 152 | |
| 153 | get_page(kpage); |
| 154 | page_add_new_anon_rmap(kpage, vma, addr); |
| 155 | |
Srikar Dronamraju | 7396fa8 | 2012-04-11 16:05:16 +0530 | [diff] [blame] | 156 | if (!PageAnon(page)) { |
| 157 | dec_mm_counter(mm, MM_FILEPAGES); |
| 158 | inc_mm_counter(mm, MM_ANONPAGES); |
| 159 | } |
| 160 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 161 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
| 162 | ptep_clear_flush(vma, addr, ptep); |
| 163 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
| 164 | |
| 165 | page_remove_rmap(page); |
| 166 | if (!page_mapped(page)) |
| 167 | try_to_free_swap(page); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 168 | pte_unmap_unlock(ptep, ptl); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 169 | |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 170 | if (vma->vm_flags & VM_LOCKED) |
| 171 | munlock_vma_page(page); |
| 172 | put_page(page); |
| 173 | |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 174 | err = 0; |
| 175 | unlock: |
| 176 | unlock_page(page); |
| 177 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 181 | * is_swbp_insn - check if instruction is breakpoint instruction. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 182 | * @insn: instruction to be checked. |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 183 | * Default implementation of is_swbp_insn |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 184 | * Returns true if @insn is a breakpoint instruction. |
| 185 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 186 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 187 | { |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 188 | return *insn == UPROBE_SWBP_INSN; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | /* |
| 192 | * NOTE: |
| 193 | * Expect the breakpoint instruction to be the smallest size instruction for |
| 194 | * the architecture. If an arch has variable length instruction and the |
| 195 | * breakpoint instruction is not of the smallest length instruction |
| 196 | * supported by that architecture then we need to modify read_opcode / |
| 197 | * write_opcode accordingly. This would never be a problem for archs that |
| 198 | * have fixed length instructions. |
| 199 | */ |
| 200 | |
| 201 | /* |
| 202 | * write_opcode - write the opcode at a given virtual address. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 203 | * @auprobe: arch breakpointing information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 204 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 205 | * @vaddr: the virtual address to store the opcode. |
| 206 | * @opcode: opcode to be written at @vaddr. |
| 207 | * |
| 208 | * Called with mm->mmap_sem held (for read and with a reference to |
| 209 | * mm). |
| 210 | * |
| 211 | * For mm @mm, write the opcode at @vaddr. |
| 212 | * Return 0 (success) or a negative errno. |
| 213 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 214 | static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 215 | unsigned long vaddr, uprobe_opcode_t opcode) |
| 216 | { |
| 217 | struct page *old_page, *new_page; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 218 | void *vaddr_old, *vaddr_new; |
| 219 | struct vm_area_struct *vma; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 220 | int ret; |
Oleg Nesterov | f403072 | 2012-07-29 20:22:12 +0200 | [diff] [blame] | 221 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 222 | retry: |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 223 | /* Read the page with vaddr into memory */ |
| 224 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); |
| 225 | if (ret <= 0) |
| 226 | return ret; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 227 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 228 | ret = -ENOMEM; |
| 229 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); |
| 230 | if (!new_page) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 231 | goto put_old; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 232 | |
| 233 | __SetPageUptodate(new_page); |
| 234 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 235 | /* copy the page now that we've got it stable */ |
| 236 | vaddr_old = kmap_atomic(old_page); |
| 237 | vaddr_new = kmap_atomic(new_page); |
| 238 | |
| 239 | memcpy(vaddr_new, vaddr_old, PAGE_SIZE); |
Oleg Nesterov | d9c4a30 | 2012-06-15 17:43:50 +0200 | [diff] [blame] | 240 | memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 241 | |
| 242 | kunmap_atomic(vaddr_new); |
| 243 | kunmap_atomic(vaddr_old); |
| 244 | |
| 245 | ret = anon_vma_prepare(vma); |
| 246 | if (ret) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 247 | goto put_new; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 248 | |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 249 | ret = __replace_page(vma, vaddr, old_page, new_page); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 250 | |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 251 | put_new: |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 252 | page_cache_release(new_page); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 253 | put_old: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 254 | put_page(old_page); |
| 255 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 256 | if (unlikely(ret == -EAGAIN)) |
| 257 | goto retry; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 258 | return ret; |
| 259 | } |
| 260 | |
| 261 | /** |
| 262 | * read_opcode - read the opcode at a given virtual address. |
| 263 | * @mm: the probed process address space. |
| 264 | * @vaddr: the virtual address to read the opcode. |
| 265 | * @opcode: location to store the read opcode. |
| 266 | * |
| 267 | * Called with mm->mmap_sem held (for read and with a reference to |
| 268 | * mm. |
| 269 | * |
| 270 | * For mm @mm, read the opcode at @vaddr and store it in @opcode. |
| 271 | * Return 0 (success) or a negative errno. |
| 272 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 273 | static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 274 | { |
| 275 | struct page *page; |
| 276 | void *vaddr_new; |
| 277 | int ret; |
| 278 | |
Oleg Nesterov | a3d7bb47 | 2012-05-29 21:27:59 +0200 | [diff] [blame] | 279 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 280 | if (ret <= 0) |
| 281 | return ret; |
| 282 | |
| 283 | lock_page(page); |
| 284 | vaddr_new = kmap_atomic(page); |
| 285 | vaddr &= ~PAGE_MASK; |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 286 | memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 287 | kunmap_atomic(vaddr_new); |
| 288 | unlock_page(page); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 289 | |
| 290 | put_page(page); |
| 291 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 292 | return 0; |
| 293 | } |
| 294 | |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 295 | static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 296 | { |
| 297 | uprobe_opcode_t opcode; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 298 | int result; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 299 | |
Oleg Nesterov | c00b275 | 2012-05-29 21:27:44 +0200 | [diff] [blame] | 300 | if (current->mm == mm) { |
| 301 | pagefault_disable(); |
| 302 | result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, |
| 303 | sizeof(opcode)); |
| 304 | pagefault_enable(); |
| 305 | |
| 306 | if (likely(result == 0)) |
| 307 | goto out; |
| 308 | } |
| 309 | |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 310 | result = read_opcode(mm, vaddr, &opcode); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 311 | if (result) |
| 312 | return result; |
Oleg Nesterov | c00b275 | 2012-05-29 21:27:44 +0200 | [diff] [blame] | 313 | out: |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 314 | if (is_swbp_insn(&opcode)) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 315 | return 1; |
| 316 | |
| 317 | return 0; |
| 318 | } |
| 319 | |
| 320 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 321 | * set_swbp - store breakpoint at a given address. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 322 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 323 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 324 | * @vaddr: the virtual address to insert the opcode. |
| 325 | * |
| 326 | * For mm @mm, store the breakpoint instruction at @vaddr. |
| 327 | * Return 0 (success) or a negative errno. |
| 328 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 329 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 330 | { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 331 | int result; |
Peter Zijlstra | c5784de | 2012-06-15 17:43:39 +0200 | [diff] [blame] | 332 | /* |
| 333 | * See the comment near uprobes_hash(). |
| 334 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 335 | result = is_swbp_at_addr(mm, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 336 | if (result == 1) |
| 337 | return -EEXIST; |
| 338 | |
| 339 | if (result) |
| 340 | return result; |
| 341 | |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 342 | return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | /** |
| 346 | * set_orig_insn - Restore the original instruction. |
| 347 | * @mm: the probed process address space. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 348 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 349 | * @vaddr: the virtual address to insert the opcode. |
| 350 | * @verify: if true, verify existance of breakpoint instruction. |
| 351 | * |
| 352 | * For mm @mm, restore the original opcode (opcode) at @vaddr. |
| 353 | * Return 0 (success) or a negative errno. |
| 354 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 355 | int __weak |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 356 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 357 | { |
| 358 | if (verify) { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 359 | int result; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 360 | |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 361 | result = is_swbp_at_addr(mm, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 362 | if (!result) |
| 363 | return -EINVAL; |
| 364 | |
| 365 | if (result != 1) |
| 366 | return result; |
| 367 | } |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 368 | return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | static int match_uprobe(struct uprobe *l, struct uprobe *r) |
| 372 | { |
| 373 | if (l->inode < r->inode) |
| 374 | return -1; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 375 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 376 | if (l->inode > r->inode) |
| 377 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 378 | |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 379 | if (l->offset < r->offset) |
| 380 | return -1; |
| 381 | |
| 382 | if (l->offset > r->offset) |
| 383 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 384 | |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) |
| 389 | { |
| 390 | struct uprobe u = { .inode = inode, .offset = offset }; |
| 391 | struct rb_node *n = uprobes_tree.rb_node; |
| 392 | struct uprobe *uprobe; |
| 393 | int match; |
| 394 | |
| 395 | while (n) { |
| 396 | uprobe = rb_entry(n, struct uprobe, rb_node); |
| 397 | match = match_uprobe(&u, uprobe); |
| 398 | if (!match) { |
| 399 | atomic_inc(&uprobe->ref); |
| 400 | return uprobe; |
| 401 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 402 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 403 | if (match < 0) |
| 404 | n = n->rb_left; |
| 405 | else |
| 406 | n = n->rb_right; |
| 407 | } |
| 408 | return NULL; |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Find a uprobe corresponding to a given inode:offset |
| 413 | * Acquires uprobes_treelock |
| 414 | */ |
| 415 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) |
| 416 | { |
| 417 | struct uprobe *uprobe; |
| 418 | unsigned long flags; |
| 419 | |
| 420 | spin_lock_irqsave(&uprobes_treelock, flags); |
| 421 | uprobe = __find_uprobe(inode, offset); |
| 422 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 423 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 424 | return uprobe; |
| 425 | } |
| 426 | |
| 427 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) |
| 428 | { |
| 429 | struct rb_node **p = &uprobes_tree.rb_node; |
| 430 | struct rb_node *parent = NULL; |
| 431 | struct uprobe *u; |
| 432 | int match; |
| 433 | |
| 434 | while (*p) { |
| 435 | parent = *p; |
| 436 | u = rb_entry(parent, struct uprobe, rb_node); |
| 437 | match = match_uprobe(uprobe, u); |
| 438 | if (!match) { |
| 439 | atomic_inc(&u->ref); |
| 440 | return u; |
| 441 | } |
| 442 | |
| 443 | if (match < 0) |
| 444 | p = &parent->rb_left; |
| 445 | else |
| 446 | p = &parent->rb_right; |
| 447 | |
| 448 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 449 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 450 | u = NULL; |
| 451 | rb_link_node(&uprobe->rb_node, parent, p); |
| 452 | rb_insert_color(&uprobe->rb_node, &uprobes_tree); |
| 453 | /* get access + creation ref */ |
| 454 | atomic_set(&uprobe->ref, 2); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 455 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 456 | return u; |
| 457 | } |
| 458 | |
| 459 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 460 | * Acquire uprobes_treelock. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 461 | * Matching uprobe already exists in rbtree; |
| 462 | * increment (access refcount) and return the matching uprobe. |
| 463 | * |
| 464 | * No matching uprobe; insert the uprobe in rb_tree; |
| 465 | * get a double refcount (access + creation) and return NULL. |
| 466 | */ |
| 467 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) |
| 468 | { |
| 469 | unsigned long flags; |
| 470 | struct uprobe *u; |
| 471 | |
| 472 | spin_lock_irqsave(&uprobes_treelock, flags); |
| 473 | u = __insert_uprobe(uprobe); |
| 474 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 475 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 476 | /* For now assume that the instruction need not be single-stepped */ |
| 477 | uprobe->flags |= UPROBE_SKIP_SSTEP; |
| 478 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 479 | return u; |
| 480 | } |
| 481 | |
| 482 | static void put_uprobe(struct uprobe *uprobe) |
| 483 | { |
| 484 | if (atomic_dec_and_test(&uprobe->ref)) |
| 485 | kfree(uprobe); |
| 486 | } |
| 487 | |
| 488 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) |
| 489 | { |
| 490 | struct uprobe *uprobe, *cur_uprobe; |
| 491 | |
| 492 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); |
| 493 | if (!uprobe) |
| 494 | return NULL; |
| 495 | |
| 496 | uprobe->inode = igrab(inode); |
| 497 | uprobe->offset = offset; |
| 498 | init_rwsem(&uprobe->consumer_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 499 | |
| 500 | /* add to uprobes_tree, sorted on inode:offset */ |
| 501 | cur_uprobe = insert_uprobe(uprobe); |
| 502 | |
| 503 | /* a uprobe exists for this inode:offset combination */ |
| 504 | if (cur_uprobe) { |
| 505 | kfree(uprobe); |
| 506 | uprobe = cur_uprobe; |
| 507 | iput(inode); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 508 | } else { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 509 | atomic_inc(&uprobe_events); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 510 | } |
| 511 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 512 | return uprobe; |
| 513 | } |
| 514 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 515 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
| 516 | { |
| 517 | struct uprobe_consumer *uc; |
| 518 | |
| 519 | if (!(uprobe->flags & UPROBE_RUN_HANDLER)) |
| 520 | return; |
| 521 | |
| 522 | down_read(&uprobe->consumer_rwsem); |
| 523 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
| 524 | if (!uc->filter || uc->filter(uc, current)) |
| 525 | uc->handler(uc, regs); |
| 526 | } |
| 527 | up_read(&uprobe->consumer_rwsem); |
| 528 | } |
| 529 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 530 | /* Returns the previous consumer */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 531 | static struct uprobe_consumer * |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 532 | consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 533 | { |
| 534 | down_write(&uprobe->consumer_rwsem); |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 535 | uc->next = uprobe->consumers; |
| 536 | uprobe->consumers = uc; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 537 | up_write(&uprobe->consumer_rwsem); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 538 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 539 | return uc->next; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | /* |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 543 | * For uprobe @uprobe, delete the consumer @uc. |
| 544 | * Return true if the @uc is deleted successfully |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 545 | * or return false. |
| 546 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 547 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 548 | { |
| 549 | struct uprobe_consumer **con; |
| 550 | bool ret = false; |
| 551 | |
| 552 | down_write(&uprobe->consumer_rwsem); |
| 553 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 554 | if (*con == uc) { |
| 555 | *con = uc->next; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 556 | ret = true; |
| 557 | break; |
| 558 | } |
| 559 | } |
| 560 | up_write(&uprobe->consumer_rwsem); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 561 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 562 | return ret; |
| 563 | } |
| 564 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 565 | static int |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 566 | __copy_insn(struct address_space *mapping, struct file *filp, char *insn, |
Oleg Nesterov | 593609a | 2012-06-15 17:43:59 +0200 | [diff] [blame] | 567 | unsigned long nbytes, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 568 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 569 | struct page *page; |
| 570 | void *vaddr; |
Oleg Nesterov | 593609a | 2012-06-15 17:43:59 +0200 | [diff] [blame] | 571 | unsigned long off; |
| 572 | pgoff_t idx; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 573 | |
| 574 | if (!filp) |
| 575 | return -EINVAL; |
| 576 | |
Oleg Nesterov | cc359d1 | 2012-06-15 17:43:25 +0200 | [diff] [blame] | 577 | if (!mapping->a_ops->readpage) |
| 578 | return -EIO; |
| 579 | |
Oleg Nesterov | 593609a | 2012-06-15 17:43:59 +0200 | [diff] [blame] | 580 | idx = offset >> PAGE_CACHE_SHIFT; |
| 581 | off = offset & ~PAGE_MASK; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 582 | |
| 583 | /* |
| 584 | * Ensure that the page that has the original instruction is |
| 585 | * populated and in page-cache. |
| 586 | */ |
| 587 | page = read_mapping_page(mapping, idx, filp); |
| 588 | if (IS_ERR(page)) |
| 589 | return PTR_ERR(page); |
| 590 | |
| 591 | vaddr = kmap_atomic(page); |
Oleg Nesterov | 593609a | 2012-06-15 17:43:59 +0200 | [diff] [blame] | 592 | memcpy(insn, vaddr + off, nbytes); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 593 | kunmap_atomic(vaddr); |
| 594 | page_cache_release(page); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 595 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 596 | return 0; |
| 597 | } |
| 598 | |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 599 | static int copy_insn(struct uprobe *uprobe, struct file *filp) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 600 | { |
| 601 | struct address_space *mapping; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 602 | unsigned long nbytes; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 603 | int bytes; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 604 | |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 605 | nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 606 | mapping = uprobe->inode->i_mapping; |
| 607 | |
| 608 | /* Instruction at end of binary; copy only available bytes */ |
| 609 | if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) |
| 610 | bytes = uprobe->inode->i_size - uprobe->offset; |
| 611 | else |
| 612 | bytes = MAX_UINSN_BYTES; |
| 613 | |
| 614 | /* Instruction at the page-boundary; copy bytes in second page */ |
| 615 | if (nbytes < bytes) { |
Oleg Nesterov | fc36f59 | 2012-06-15 17:43:44 +0200 | [diff] [blame] | 616 | int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes, |
| 617 | bytes - nbytes, uprobe->offset + nbytes); |
| 618 | if (err) |
| 619 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 620 | bytes = nbytes; |
| 621 | } |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 622 | return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 623 | } |
| 624 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 625 | /* |
| 626 | * How mm->uprobes_state.count gets updated |
| 627 | * uprobe_mmap() increments the count if |
| 628 | * - it successfully adds a breakpoint. |
| 629 | * - it cannot add a breakpoint, but sees that there is a underlying |
| 630 | * breakpoint (via a is_swbp_at_addr()). |
| 631 | * |
| 632 | * uprobe_munmap() decrements the count if |
| 633 | * - it sees a underlying breakpoint, (via is_swbp_at_addr) |
| 634 | * (Subsequent uprobe_unregister wouldnt find the breakpoint |
| 635 | * unless a uprobe_mmap kicks in, since the old vma would be |
| 636 | * dropped just after uprobe_munmap.) |
| 637 | * |
| 638 | * uprobe_register increments the count if: |
| 639 | * - it successfully adds a breakpoint. |
| 640 | * |
| 641 | * uprobe_unregister decrements the count if: |
| 642 | * - it sees a underlying breakpoint and removes successfully. |
| 643 | * (via is_swbp_at_addr) |
| 644 | * (Subsequent uprobe_munmap wouldnt find the breakpoint |
| 645 | * since there is no underlying breakpoint after the |
| 646 | * breakpoint removal.) |
| 647 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 648 | static int |
| 649 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 650 | struct vm_area_struct *vma, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 651 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 652 | int ret; |
| 653 | |
| 654 | /* |
| 655 | * If probe is being deleted, unregister thread could be done with |
| 656 | * the vma-rmap-walk through. Adding a probe now can be fatal since |
| 657 | * nobody will be able to cleanup. Also we could be from fork or |
| 658 | * mremap path, where the probe might have already been inserted. |
| 659 | * Hence behave as if probe already existed. |
| 660 | */ |
| 661 | if (!uprobe->consumers) |
| 662 | return -EEXIST; |
| 663 | |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 664 | if (!(uprobe->flags & UPROBE_COPY_INSN)) { |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 665 | ret = copy_insn(uprobe, vma->vm_file); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 666 | if (ret) |
| 667 | return ret; |
| 668 | |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 669 | if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn)) |
Oleg Nesterov | c1914a0 | 2012-06-15 17:43:31 +0200 | [diff] [blame] | 670 | return -ENOTSUPP; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 671 | |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 672 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 673 | if (ret) |
| 674 | return ret; |
| 675 | |
Oleg Nesterov | d9c4a30 | 2012-06-15 17:43:50 +0200 | [diff] [blame] | 676 | /* write_opcode() assumes we don't cross page boundary */ |
| 677 | BUG_ON((uprobe->offset & ~PAGE_MASK) + |
| 678 | UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); |
| 679 | |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 680 | uprobe->flags |= UPROBE_COPY_INSN; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 681 | } |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 682 | |
| 683 | /* |
| 684 | * Ideally, should be updating the probe count after the breakpoint |
| 685 | * has been successfully inserted. However a thread could hit the |
| 686 | * breakpoint we just inserted even before the probe count is |
| 687 | * incremented. If this is the first breakpoint placed, breakpoint |
| 688 | * notifier might ignore uprobes and pass the trap to the thread. |
| 689 | * Hence increment before and decrement on failure. |
| 690 | */ |
| 691 | atomic_inc(&mm->uprobes_state.count); |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 692 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 693 | if (ret) |
| 694 | atomic_dec(&mm->uprobes_state.count); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 695 | |
| 696 | return ret; |
| 697 | } |
| 698 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 699 | static void |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 700 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 701 | { |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 702 | if (!set_orig_insn(&uprobe->arch, mm, vaddr, true)) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 703 | atomic_dec(&mm->uprobes_state.count); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 704 | } |
| 705 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 706 | /* |
Oleg Nesterov | 778b032 | 2012-05-29 21:30:08 +0200 | [diff] [blame] | 707 | * There could be threads that have already hit the breakpoint. They |
| 708 | * will recheck the current insn and restart if find_uprobe() fails. |
| 709 | * See find_active_uprobe(). |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 710 | */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 711 | static void delete_uprobe(struct uprobe *uprobe) |
| 712 | { |
| 713 | unsigned long flags; |
| 714 | |
| 715 | spin_lock_irqsave(&uprobes_treelock, flags); |
| 716 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
| 717 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
| 718 | iput(uprobe->inode); |
| 719 | put_uprobe(uprobe); |
| 720 | atomic_dec(&uprobe_events); |
| 721 | } |
| 722 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 723 | struct map_info { |
| 724 | struct map_info *next; |
| 725 | struct mm_struct *mm; |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 726 | unsigned long vaddr; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 727 | }; |
| 728 | |
| 729 | static inline struct map_info *free_map_info(struct map_info *info) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 730 | { |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 731 | struct map_info *next = info->next; |
| 732 | kfree(info); |
| 733 | return next; |
| 734 | } |
| 735 | |
| 736 | static struct map_info * |
| 737 | build_map_info(struct address_space *mapping, loff_t offset, bool is_register) |
| 738 | { |
| 739 | unsigned long pgoff = offset >> PAGE_SHIFT; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 740 | struct prio_tree_iter iter; |
| 741 | struct vm_area_struct *vma; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 742 | struct map_info *curr = NULL; |
| 743 | struct map_info *prev = NULL; |
| 744 | struct map_info *info; |
| 745 | int more = 0; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 746 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 747 | again: |
| 748 | mutex_lock(&mapping->i_mmap_mutex); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 749 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
| 750 | if (!valid_vma(vma, is_register)) |
| 751 | continue; |
| 752 | |
Oleg Nesterov | 7a5bfb6 | 2012-06-15 17:43:36 +0200 | [diff] [blame] | 753 | if (!prev && !more) { |
| 754 | /* |
| 755 | * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through |
| 756 | * reclaim. This is optimistic, no harm done if it fails. |
| 757 | */ |
| 758 | prev = kmalloc(sizeof(struct map_info), |
| 759 | GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); |
| 760 | if (prev) |
| 761 | prev->next = NULL; |
| 762 | } |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 763 | if (!prev) { |
| 764 | more++; |
| 765 | continue; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 766 | } |
| 767 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 768 | if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) |
| 769 | continue; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 770 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 771 | info = prev; |
| 772 | prev = prev->next; |
| 773 | info->next = curr; |
| 774 | curr = info; |
| 775 | |
| 776 | info->mm = vma->vm_mm; |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 777 | info->vaddr = offset_to_vaddr(vma, offset); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 778 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 779 | mutex_unlock(&mapping->i_mmap_mutex); |
| 780 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 781 | if (!more) |
| 782 | goto out; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 783 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 784 | prev = curr; |
| 785 | while (curr) { |
| 786 | mmput(curr->mm); |
| 787 | curr = curr->next; |
| 788 | } |
| 789 | |
| 790 | do { |
| 791 | info = kmalloc(sizeof(struct map_info), GFP_KERNEL); |
| 792 | if (!info) { |
| 793 | curr = ERR_PTR(-ENOMEM); |
| 794 | goto out; |
| 795 | } |
| 796 | info->next = prev; |
| 797 | prev = info; |
| 798 | } while (--more); |
| 799 | |
| 800 | goto again; |
| 801 | out: |
| 802 | while (prev) |
| 803 | prev = free_map_info(prev); |
| 804 | return curr; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | static int register_for_each_vma(struct uprobe *uprobe, bool is_register) |
| 808 | { |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 809 | struct map_info *info; |
| 810 | int err = 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 811 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 812 | info = build_map_info(uprobe->inode->i_mapping, |
| 813 | uprobe->offset, is_register); |
| 814 | if (IS_ERR(info)) |
| 815 | return PTR_ERR(info); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 816 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 817 | while (info) { |
| 818 | struct mm_struct *mm = info->mm; |
| 819 | struct vm_area_struct *vma; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 820 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 821 | if (err) |
| 822 | goto free; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 823 | |
Oleg Nesterov | 77fc4af | 2012-05-29 21:29:28 +0200 | [diff] [blame] | 824 | down_write(&mm->mmap_sem); |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 825 | vma = find_vma(mm, info->vaddr); |
| 826 | if (!vma || !valid_vma(vma, is_register) || |
| 827 | vma->vm_file->f_mapping->host != uprobe->inode) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 828 | goto unlock; |
| 829 | |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 830 | if (vma->vm_start > info->vaddr || |
| 831 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 832 | goto unlock; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 833 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 834 | if (is_register) { |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 835 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); |
Peter Zijlstra | c5784de | 2012-06-15 17:43:39 +0200 | [diff] [blame] | 836 | /* |
| 837 | * We can race against uprobe_mmap(), see the |
| 838 | * comment near uprobe_hash(). |
| 839 | */ |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 840 | if (err == -EEXIST) |
| 841 | err = 0; |
| 842 | } else { |
| 843 | remove_breakpoint(uprobe, mm, info->vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 844 | } |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 845 | unlock: |
| 846 | up_write(&mm->mmap_sem); |
| 847 | free: |
| 848 | mmput(mm); |
| 849 | info = free_map_info(info); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 850 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 851 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 852 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 853 | } |
| 854 | |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 855 | static int __uprobe_register(struct uprobe *uprobe) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 856 | { |
| 857 | return register_for_each_vma(uprobe, true); |
| 858 | } |
| 859 | |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 860 | static void __uprobe_unregister(struct uprobe *uprobe) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 861 | { |
| 862 | if (!register_for_each_vma(uprobe, false)) |
| 863 | delete_uprobe(uprobe); |
| 864 | |
| 865 | /* TODO : cant unregister? schedule a worker thread */ |
| 866 | } |
| 867 | |
| 868 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 869 | * uprobe_register - register a probe |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 870 | * @inode: the file in which the probe has to be placed. |
| 871 | * @offset: offset from the start of the file. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 872 | * @uc: information on howto handle the probe.. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 873 | * |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 874 | * Apart from the access refcount, uprobe_register() takes a creation |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 875 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
| 876 | * inserted into the rbtree (i.e first consumer for a @inode:@offset |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 877 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 878 | * @uprobe even before the register operation is complete. Creation |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 879 | * refcount is released when the last @uc for the @uprobe |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 880 | * unregisters. |
| 881 | * |
| 882 | * Return errno if it cannot successully install probes |
| 883 | * else return 0 (success) |
| 884 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 885 | int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 886 | { |
| 887 | struct uprobe *uprobe; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 888 | int ret; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 889 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 890 | if (!inode || !uc || uc->next) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 891 | return -EINVAL; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 892 | |
| 893 | if (offset > i_size_read(inode)) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 894 | return -EINVAL; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 895 | |
| 896 | ret = 0; |
| 897 | mutex_lock(uprobes_hash(inode)); |
| 898 | uprobe = alloc_uprobe(inode, offset); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 899 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 900 | if (uprobe && !consumer_add(uprobe, uc)) { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 901 | ret = __uprobe_register(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 902 | if (ret) { |
| 903 | uprobe->consumers = NULL; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 904 | __uprobe_unregister(uprobe); |
| 905 | } else { |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 906 | uprobe->flags |= UPROBE_RUN_HANDLER; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 907 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 908 | } |
| 909 | |
| 910 | mutex_unlock(uprobes_hash(inode)); |
| 911 | put_uprobe(uprobe); |
| 912 | |
| 913 | return ret; |
| 914 | } |
| 915 | |
| 916 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 917 | * uprobe_unregister - unregister a already registered probe. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 918 | * @inode: the file in which the probe has to be removed. |
| 919 | * @offset: offset from the start of the file. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 920 | * @uc: identify which probe if multiple probes are colocated. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 921 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 922 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 923 | { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 924 | struct uprobe *uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 925 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 926 | if (!inode || !uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 927 | return; |
| 928 | |
| 929 | uprobe = find_uprobe(inode, offset); |
| 930 | if (!uprobe) |
| 931 | return; |
| 932 | |
| 933 | mutex_lock(uprobes_hash(inode)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 934 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 935 | if (consumer_del(uprobe, uc)) { |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 936 | if (!uprobe->consumers) { |
| 937 | __uprobe_unregister(uprobe); |
Srikar Dronamraju | 900771a | 2012-03-12 14:55:14 +0530 | [diff] [blame] | 938 | uprobe->flags &= ~UPROBE_RUN_HANDLER; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 939 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 940 | } |
| 941 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 942 | mutex_unlock(uprobes_hash(inode)); |
| 943 | if (uprobe) |
| 944 | put_uprobe(uprobe); |
| 945 | } |
| 946 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 947 | static struct rb_node * |
| 948 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 949 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 950 | struct rb_node *n = uprobes_tree.rb_node; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 951 | |
| 952 | while (n) { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 953 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 954 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 955 | if (inode < u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 956 | n = n->rb_left; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 957 | } else if (inode > u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 958 | n = n->rb_right; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 959 | } else { |
| 960 | if (max < u->offset) |
| 961 | n = n->rb_left; |
| 962 | else if (min > u->offset) |
| 963 | n = n->rb_right; |
| 964 | else |
| 965 | break; |
| 966 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 967 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 968 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 969 | return n; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 970 | } |
| 971 | |
| 972 | /* |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 973 | * For a given range in vma, build a list of probes that need to be inserted. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 974 | */ |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 975 | static void build_probe_list(struct inode *inode, |
| 976 | struct vm_area_struct *vma, |
| 977 | unsigned long start, unsigned long end, |
| 978 | struct list_head *head) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 979 | { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 980 | loff_t min, max; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 981 | unsigned long flags; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 982 | struct rb_node *n, *t; |
| 983 | struct uprobe *u; |
| 984 | |
| 985 | INIT_LIST_HEAD(head); |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 986 | min = vaddr_to_offset(vma, start); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 987 | max = min + (end - start) - 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 988 | |
| 989 | spin_lock_irqsave(&uprobes_treelock, flags); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 990 | n = find_node_in_range(inode, min, max); |
| 991 | if (n) { |
| 992 | for (t = n; t; t = rb_prev(t)) { |
| 993 | u = rb_entry(t, struct uprobe, rb_node); |
| 994 | if (u->inode != inode || u->offset < min) |
| 995 | break; |
| 996 | list_add(&u->pending_list, head); |
| 997 | atomic_inc(&u->ref); |
| 998 | } |
| 999 | for (t = n; (t = rb_next(t)); ) { |
| 1000 | u = rb_entry(t, struct uprobe, rb_node); |
| 1001 | if (u->inode != inode || u->offset > max) |
| 1002 | break; |
| 1003 | list_add(&u->pending_list, head); |
| 1004 | atomic_inc(&u->ref); |
| 1005 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1006 | } |
| 1007 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
| 1008 | } |
| 1009 | |
| 1010 | /* |
| 1011 | * Called from mmap_region. |
| 1012 | * called with mm->mmap_sem acquired. |
| 1013 | * |
| 1014 | * Return -ve no if we fail to insert probes and we cannot |
| 1015 | * bail-out. |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1016 | * Return 0 otherwise. i.e: |
| 1017 | * |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1018 | * - successful insertion of probes |
| 1019 | * - (or) no possible probes to be inserted. |
| 1020 | * - (or) insertion of probes failed but we can bail-out. |
| 1021 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1022 | int uprobe_mmap(struct vm_area_struct *vma) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1023 | { |
| 1024 | struct list_head tmp_list; |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1025 | struct uprobe *uprobe, *u; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1026 | struct inode *inode; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1027 | int ret, count; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1028 | |
| 1029 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1030 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1031 | |
| 1032 | inode = vma->vm_file->f_mapping->host; |
| 1033 | if (!inode) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1034 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1035 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1036 | mutex_lock(uprobes_mmap_hash(inode)); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1037 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1038 | |
| 1039 | ret = 0; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1040 | count = 0; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1041 | |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1042 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1043 | if (!ret) { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1044 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1045 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1046 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
Peter Zijlstra | c5784de | 2012-06-15 17:43:39 +0200 | [diff] [blame] | 1047 | /* |
| 1048 | * We can race against uprobe_register(), see the |
| 1049 | * comment near uprobe_hash(). |
| 1050 | */ |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1051 | if (ret == -EEXIST) { |
| 1052 | ret = 0; |
| 1053 | |
| 1054 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) |
| 1055 | continue; |
| 1056 | |
| 1057 | /* |
| 1058 | * Unable to insert a breakpoint, but |
| 1059 | * breakpoint lies underneath. Increment the |
| 1060 | * probe count. |
| 1061 | */ |
| 1062 | atomic_inc(&vma->vm_mm->uprobes_state.count); |
| 1063 | } |
| 1064 | |
| 1065 | if (!ret) |
| 1066 | count++; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1067 | } |
| 1068 | put_uprobe(uprobe); |
| 1069 | } |
| 1070 | |
| 1071 | mutex_unlock(uprobes_mmap_hash(inode)); |
| 1072 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1073 | if (ret) |
| 1074 | atomic_sub(count, &vma->vm_mm->uprobes_state.count); |
| 1075 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1076 | return ret; |
| 1077 | } |
| 1078 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1079 | /* |
| 1080 | * Called in context of a munmap of a vma. |
| 1081 | */ |
Srikar Dronamraju | cbc91f7 | 2012-04-11 16:05:27 +0530 | [diff] [blame] | 1082 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1083 | { |
| 1084 | struct list_head tmp_list; |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1085 | struct uprobe *uprobe, *u; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1086 | struct inode *inode; |
| 1087 | |
| 1088 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) |
| 1089 | return; |
| 1090 | |
Oleg Nesterov | 2fd611a | 2012-07-29 20:22:31 +0200 | [diff] [blame] | 1091 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
| 1092 | return; |
| 1093 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1094 | if (!atomic_read(&vma->vm_mm->uprobes_state.count)) |
| 1095 | return; |
| 1096 | |
| 1097 | inode = vma->vm_file->f_mapping->host; |
| 1098 | if (!inode) |
| 1099 | return; |
| 1100 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1101 | mutex_lock(uprobes_mmap_hash(inode)); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1102 | build_probe_list(inode, vma, start, end, &tmp_list); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1103 | |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1104 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1105 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1106 | /* |
| 1107 | * An unregister could have removed the probe before |
| 1108 | * unmap. So check before we decrement the count. |
| 1109 | */ |
| 1110 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) |
| 1111 | atomic_dec(&vma->vm_mm->uprobes_state.count); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1112 | put_uprobe(uprobe); |
| 1113 | } |
| 1114 | mutex_unlock(uprobes_mmap_hash(inode)); |
| 1115 | } |
| 1116 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1117 | /* Slot allocation for XOL */ |
| 1118 | static int xol_add_vma(struct xol_area *area) |
| 1119 | { |
| 1120 | struct mm_struct *mm; |
| 1121 | int ret; |
| 1122 | |
| 1123 | area->page = alloc_page(GFP_HIGHUSER); |
| 1124 | if (!area->page) |
| 1125 | return -ENOMEM; |
| 1126 | |
| 1127 | ret = -EALREADY; |
| 1128 | mm = current->mm; |
| 1129 | |
| 1130 | down_write(&mm->mmap_sem); |
| 1131 | if (mm->uprobes_state.xol_area) |
| 1132 | goto fail; |
| 1133 | |
| 1134 | ret = -ENOMEM; |
| 1135 | |
| 1136 | /* Try to map as high as possible, this is only a hint. */ |
| 1137 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0); |
| 1138 | if (area->vaddr & ~PAGE_MASK) { |
| 1139 | ret = area->vaddr; |
| 1140 | goto fail; |
| 1141 | } |
| 1142 | |
| 1143 | ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE, |
| 1144 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page); |
| 1145 | if (ret) |
| 1146 | goto fail; |
| 1147 | |
| 1148 | smp_wmb(); /* pairs with get_xol_area() */ |
| 1149 | mm->uprobes_state.xol_area = area; |
| 1150 | ret = 0; |
| 1151 | |
| 1152 | fail: |
| 1153 | up_write(&mm->mmap_sem); |
| 1154 | if (ret) |
| 1155 | __free_page(area->page); |
| 1156 | |
| 1157 | return ret; |
| 1158 | } |
| 1159 | |
| 1160 | static struct xol_area *get_xol_area(struct mm_struct *mm) |
| 1161 | { |
| 1162 | struct xol_area *area; |
| 1163 | |
| 1164 | area = mm->uprobes_state.xol_area; |
| 1165 | smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */ |
| 1166 | |
| 1167 | return area; |
| 1168 | } |
| 1169 | |
| 1170 | /* |
| 1171 | * xol_alloc_area - Allocate process's xol_area. |
| 1172 | * This area will be used for storing instructions for execution out of |
| 1173 | * line. |
| 1174 | * |
| 1175 | * Returns the allocated area or NULL. |
| 1176 | */ |
| 1177 | static struct xol_area *xol_alloc_area(void) |
| 1178 | { |
| 1179 | struct xol_area *area; |
| 1180 | |
| 1181 | area = kzalloc(sizeof(*area), GFP_KERNEL); |
| 1182 | if (unlikely(!area)) |
| 1183 | return NULL; |
| 1184 | |
| 1185 | area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL); |
| 1186 | |
| 1187 | if (!area->bitmap) |
| 1188 | goto fail; |
| 1189 | |
| 1190 | init_waitqueue_head(&area->wq); |
| 1191 | if (!xol_add_vma(area)) |
| 1192 | return area; |
| 1193 | |
| 1194 | fail: |
| 1195 | kfree(area->bitmap); |
| 1196 | kfree(area); |
| 1197 | |
| 1198 | return get_xol_area(current->mm); |
| 1199 | } |
| 1200 | |
| 1201 | /* |
| 1202 | * uprobe_clear_state - Free the area allocated for slots. |
| 1203 | */ |
| 1204 | void uprobe_clear_state(struct mm_struct *mm) |
| 1205 | { |
| 1206 | struct xol_area *area = mm->uprobes_state.xol_area; |
| 1207 | |
| 1208 | if (!area) |
| 1209 | return; |
| 1210 | |
| 1211 | put_page(area->page); |
| 1212 | kfree(area->bitmap); |
| 1213 | kfree(area); |
| 1214 | } |
| 1215 | |
| 1216 | /* |
| 1217 | * uprobe_reset_state - Free the area allocated for slots. |
| 1218 | */ |
| 1219 | void uprobe_reset_state(struct mm_struct *mm) |
| 1220 | { |
| 1221 | mm->uprobes_state.xol_area = NULL; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1222 | atomic_set(&mm->uprobes_state.count, 0); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1223 | } |
| 1224 | |
| 1225 | /* |
| 1226 | * - search for a free slot. |
| 1227 | */ |
| 1228 | static unsigned long xol_take_insn_slot(struct xol_area *area) |
| 1229 | { |
| 1230 | unsigned long slot_addr; |
| 1231 | int slot_nr; |
| 1232 | |
| 1233 | do { |
| 1234 | slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); |
| 1235 | if (slot_nr < UINSNS_PER_PAGE) { |
| 1236 | if (!test_and_set_bit(slot_nr, area->bitmap)) |
| 1237 | break; |
| 1238 | |
| 1239 | slot_nr = UINSNS_PER_PAGE; |
| 1240 | continue; |
| 1241 | } |
| 1242 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); |
| 1243 | } while (slot_nr >= UINSNS_PER_PAGE); |
| 1244 | |
| 1245 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); |
| 1246 | atomic_inc(&area->slot_count); |
| 1247 | |
| 1248 | return slot_addr; |
| 1249 | } |
| 1250 | |
| 1251 | /* |
| 1252 | * xol_get_insn_slot - If was not allocated a slot, then |
| 1253 | * allocate a slot. |
| 1254 | * Returns the allocated slot address or 0. |
| 1255 | */ |
| 1256 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr) |
| 1257 | { |
| 1258 | struct xol_area *area; |
| 1259 | unsigned long offset; |
| 1260 | void *vaddr; |
| 1261 | |
| 1262 | area = get_xol_area(current->mm); |
| 1263 | if (!area) { |
| 1264 | area = xol_alloc_area(); |
| 1265 | if (!area) |
| 1266 | return 0; |
| 1267 | } |
| 1268 | current->utask->xol_vaddr = xol_take_insn_slot(area); |
| 1269 | |
| 1270 | /* |
| 1271 | * Initialize the slot if xol_vaddr points to valid |
| 1272 | * instruction slot. |
| 1273 | */ |
| 1274 | if (unlikely(!current->utask->xol_vaddr)) |
| 1275 | return 0; |
| 1276 | |
| 1277 | current->utask->vaddr = slot_addr; |
| 1278 | offset = current->utask->xol_vaddr & ~PAGE_MASK; |
| 1279 | vaddr = kmap_atomic(area->page); |
| 1280 | memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); |
| 1281 | kunmap_atomic(vaddr); |
| 1282 | |
| 1283 | return current->utask->xol_vaddr; |
| 1284 | } |
| 1285 | |
| 1286 | /* |
| 1287 | * xol_free_insn_slot - If slot was earlier allocated by |
| 1288 | * @xol_get_insn_slot(), make the slot available for |
| 1289 | * subsequent requests. |
| 1290 | */ |
| 1291 | static void xol_free_insn_slot(struct task_struct *tsk) |
| 1292 | { |
| 1293 | struct xol_area *area; |
| 1294 | unsigned long vma_end; |
| 1295 | unsigned long slot_addr; |
| 1296 | |
| 1297 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) |
| 1298 | return; |
| 1299 | |
| 1300 | slot_addr = tsk->utask->xol_vaddr; |
| 1301 | |
| 1302 | if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr))) |
| 1303 | return; |
| 1304 | |
| 1305 | area = tsk->mm->uprobes_state.xol_area; |
| 1306 | vma_end = area->vaddr + PAGE_SIZE; |
| 1307 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { |
| 1308 | unsigned long offset; |
| 1309 | int slot_nr; |
| 1310 | |
| 1311 | offset = slot_addr - area->vaddr; |
| 1312 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; |
| 1313 | if (slot_nr >= UINSNS_PER_PAGE) |
| 1314 | return; |
| 1315 | |
| 1316 | clear_bit(slot_nr, area->bitmap); |
| 1317 | atomic_dec(&area->slot_count); |
| 1318 | if (waitqueue_active(&area->wq)) |
| 1319 | wake_up(&area->wq); |
| 1320 | |
| 1321 | tsk->utask->xol_vaddr = 0; |
| 1322 | } |
| 1323 | } |
| 1324 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1325 | /** |
| 1326 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
| 1327 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
| 1328 | * instruction. |
| 1329 | * Return the address of the breakpoint instruction. |
| 1330 | */ |
| 1331 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) |
| 1332 | { |
| 1333 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; |
| 1334 | } |
| 1335 | |
| 1336 | /* |
| 1337 | * Called with no locks held. |
| 1338 | * Called in context of a exiting or a exec-ing thread. |
| 1339 | */ |
| 1340 | void uprobe_free_utask(struct task_struct *t) |
| 1341 | { |
| 1342 | struct uprobe_task *utask = t->utask; |
| 1343 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1344 | if (!utask) |
| 1345 | return; |
| 1346 | |
| 1347 | if (utask->active_uprobe) |
| 1348 | put_uprobe(utask->active_uprobe); |
| 1349 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1350 | xol_free_insn_slot(t); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1351 | kfree(utask); |
| 1352 | t->utask = NULL; |
| 1353 | } |
| 1354 | |
| 1355 | /* |
| 1356 | * Called in context of a new clone/fork from copy_process. |
| 1357 | */ |
| 1358 | void uprobe_copy_process(struct task_struct *t) |
| 1359 | { |
| 1360 | t->utask = NULL; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1361 | } |
| 1362 | |
| 1363 | /* |
| 1364 | * Allocate a uprobe_task object for the task. |
| 1365 | * Called when the thread hits a breakpoint for the first time. |
| 1366 | * |
| 1367 | * Returns: |
| 1368 | * - pointer to new uprobe_task on success |
| 1369 | * - NULL otherwise |
| 1370 | */ |
| 1371 | static struct uprobe_task *add_utask(void) |
| 1372 | { |
| 1373 | struct uprobe_task *utask; |
| 1374 | |
| 1375 | utask = kzalloc(sizeof *utask, GFP_KERNEL); |
| 1376 | if (unlikely(!utask)) |
| 1377 | return NULL; |
| 1378 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1379 | current->utask = utask; |
| 1380 | return utask; |
| 1381 | } |
| 1382 | |
| 1383 | /* Prepare to single-step probed instruction out of line. */ |
| 1384 | static int |
| 1385 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr) |
| 1386 | { |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1387 | if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs)) |
| 1388 | return 0; |
| 1389 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1390 | return -EFAULT; |
| 1391 | } |
| 1392 | |
| 1393 | /* |
| 1394 | * If we are singlestepping, then ensure this thread is not connected to |
| 1395 | * non-fatal signals until completion of singlestep. When xol insn itself |
| 1396 | * triggers the signal, restart the original insn even if the task is |
| 1397 | * already SIGKILL'ed (since coredump should report the correct ip). This |
| 1398 | * is even more important if the task has a handler for SIGSEGV/etc, The |
| 1399 | * _same_ instruction should be repeated again after return from the signal |
| 1400 | * handler, and SSTEP can never finish in this case. |
| 1401 | */ |
| 1402 | bool uprobe_deny_signal(void) |
| 1403 | { |
| 1404 | struct task_struct *t = current; |
| 1405 | struct uprobe_task *utask = t->utask; |
| 1406 | |
| 1407 | if (likely(!utask || !utask->active_uprobe)) |
| 1408 | return false; |
| 1409 | |
| 1410 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); |
| 1411 | |
| 1412 | if (signal_pending(t)) { |
| 1413 | spin_lock_irq(&t->sighand->siglock); |
| 1414 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
| 1415 | spin_unlock_irq(&t->sighand->siglock); |
| 1416 | |
| 1417 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { |
| 1418 | utask->state = UTASK_SSTEP_TRAPPED; |
| 1419 | set_tsk_thread_flag(t, TIF_UPROBE); |
| 1420 | set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); |
| 1421 | } |
| 1422 | } |
| 1423 | |
| 1424 | return true; |
| 1425 | } |
| 1426 | |
| 1427 | /* |
| 1428 | * Avoid singlestepping the original instruction if the original instruction |
| 1429 | * is a NOP or can be emulated. |
| 1430 | */ |
| 1431 | static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) |
| 1432 | { |
| 1433 | if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) |
| 1434 | return true; |
| 1435 | |
| 1436 | uprobe->flags &= ~UPROBE_SKIP_SSTEP; |
| 1437 | return false; |
| 1438 | } |
| 1439 | |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 1440 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1441 | { |
| 1442 | struct mm_struct *mm = current->mm; |
| 1443 | struct uprobe *uprobe = NULL; |
| 1444 | struct vm_area_struct *vma; |
| 1445 | |
| 1446 | down_read(&mm->mmap_sem); |
| 1447 | vma = find_vma(mm, bp_vaddr); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1448 | if (vma && vma->vm_start <= bp_vaddr) { |
| 1449 | if (valid_vma(vma, false)) { |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 1450 | struct inode *inode = vma->vm_file->f_mapping->host; |
| 1451 | loff_t offset = vaddr_to_offset(vma, bp_vaddr); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1452 | |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1453 | uprobe = find_uprobe(inode, offset); |
| 1454 | } |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 1455 | |
| 1456 | if (!uprobe) |
| 1457 | *is_swbp = is_swbp_at_addr(mm, bp_vaddr); |
| 1458 | } else { |
| 1459 | *is_swbp = -EFAULT; |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1460 | } |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 1461 | up_read(&mm->mmap_sem); |
| 1462 | |
| 1463 | return uprobe; |
| 1464 | } |
| 1465 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1466 | /* |
| 1467 | * Run handler and ask thread to singlestep. |
| 1468 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
| 1469 | */ |
| 1470 | static void handle_swbp(struct pt_regs *regs) |
| 1471 | { |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1472 | struct uprobe_task *utask; |
| 1473 | struct uprobe *uprobe; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1474 | unsigned long bp_vaddr; |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 1475 | int uninitialized_var(is_swbp); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1476 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1477 | bp_vaddr = uprobe_get_swbp_addr(regs); |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 1478 | uprobe = find_active_uprobe(bp_vaddr, &is_swbp); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1479 | |
| 1480 | if (!uprobe) { |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 1481 | if (is_swbp > 0) { |
| 1482 | /* No matching uprobe; signal SIGTRAP. */ |
| 1483 | send_sig(SIGTRAP, current, 0); |
| 1484 | } else { |
| 1485 | /* |
| 1486 | * Either we raced with uprobe_unregister() or we can't |
| 1487 | * access this memory. The latter is only possible if |
| 1488 | * another thread plays with our ->mm. In both cases |
| 1489 | * we can simply restart. If this vma was unmapped we |
| 1490 | * can pretend this insn was not executed yet and get |
| 1491 | * the (correct) SIGSEGV after restart. |
| 1492 | */ |
| 1493 | instruction_pointer_set(regs, bp_vaddr); |
| 1494 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1495 | return; |
| 1496 | } |
| 1497 | |
| 1498 | utask = current->utask; |
| 1499 | if (!utask) { |
| 1500 | utask = add_utask(); |
| 1501 | /* Cannot allocate; re-execute the instruction. */ |
| 1502 | if (!utask) |
| 1503 | goto cleanup_ret; |
| 1504 | } |
| 1505 | utask->active_uprobe = uprobe; |
| 1506 | handler_chain(uprobe, regs); |
| 1507 | if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs)) |
| 1508 | goto cleanup_ret; |
| 1509 | |
| 1510 | utask->state = UTASK_SSTEP; |
| 1511 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { |
| 1512 | user_enable_single_step(current); |
| 1513 | return; |
| 1514 | } |
| 1515 | |
| 1516 | cleanup_ret: |
| 1517 | if (utask) { |
| 1518 | utask->active_uprobe = NULL; |
| 1519 | utask->state = UTASK_RUNNING; |
| 1520 | } |
| 1521 | if (uprobe) { |
| 1522 | if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) |
| 1523 | |
| 1524 | /* |
| 1525 | * cannot singlestep; cannot skip instruction; |
| 1526 | * re-execute the instruction. |
| 1527 | */ |
| 1528 | instruction_pointer_set(regs, bp_vaddr); |
| 1529 | |
| 1530 | put_uprobe(uprobe); |
| 1531 | } |
| 1532 | } |
| 1533 | |
| 1534 | /* |
| 1535 | * Perform required fix-ups and disable singlestep. |
| 1536 | * Allow pending signals to take effect. |
| 1537 | */ |
| 1538 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) |
| 1539 | { |
| 1540 | struct uprobe *uprobe; |
| 1541 | |
| 1542 | uprobe = utask->active_uprobe; |
| 1543 | if (utask->state == UTASK_SSTEP_ACK) |
| 1544 | arch_uprobe_post_xol(&uprobe->arch, regs); |
| 1545 | else if (utask->state == UTASK_SSTEP_TRAPPED) |
| 1546 | arch_uprobe_abort_xol(&uprobe->arch, regs); |
| 1547 | else |
| 1548 | WARN_ON_ONCE(1); |
| 1549 | |
| 1550 | put_uprobe(uprobe); |
| 1551 | utask->active_uprobe = NULL; |
| 1552 | utask->state = UTASK_RUNNING; |
| 1553 | user_disable_single_step(current); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1554 | xol_free_insn_slot(current); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1555 | |
| 1556 | spin_lock_irq(¤t->sighand->siglock); |
| 1557 | recalc_sigpending(); /* see uprobe_deny_signal() */ |
| 1558 | spin_unlock_irq(¤t->sighand->siglock); |
| 1559 | } |
| 1560 | |
| 1561 | /* |
| 1562 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on |
| 1563 | * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and |
| 1564 | * allows the thread to return from interrupt. |
| 1565 | * |
| 1566 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and |
| 1567 | * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from |
| 1568 | * interrupt. |
| 1569 | * |
| 1570 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls |
| 1571 | * uprobe_notify_resume(). |
| 1572 | */ |
| 1573 | void uprobe_notify_resume(struct pt_regs *regs) |
| 1574 | { |
| 1575 | struct uprobe_task *utask; |
| 1576 | |
| 1577 | utask = current->utask; |
| 1578 | if (!utask || utask->state == UTASK_BP_HIT) |
| 1579 | handle_swbp(regs); |
| 1580 | else |
| 1581 | handle_singlestep(utask, regs); |
| 1582 | } |
| 1583 | |
| 1584 | /* |
| 1585 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of |
| 1586 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. |
| 1587 | */ |
| 1588 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) |
| 1589 | { |
| 1590 | struct uprobe_task *utask; |
| 1591 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1592 | if (!current->mm || !atomic_read(¤t->mm->uprobes_state.count)) |
| 1593 | /* task is currently not uprobed */ |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1594 | return 0; |
| 1595 | |
| 1596 | utask = current->utask; |
| 1597 | if (utask) |
| 1598 | utask->state = UTASK_BP_HIT; |
| 1599 | |
| 1600 | set_thread_flag(TIF_UPROBE); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1601 | |
| 1602 | return 1; |
| 1603 | } |
| 1604 | |
| 1605 | /* |
| 1606 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier |
| 1607 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. |
| 1608 | */ |
| 1609 | int uprobe_post_sstep_notifier(struct pt_regs *regs) |
| 1610 | { |
| 1611 | struct uprobe_task *utask = current->utask; |
| 1612 | |
| 1613 | if (!current->mm || !utask || !utask->active_uprobe) |
| 1614 | /* task is currently not uprobed */ |
| 1615 | return 0; |
| 1616 | |
| 1617 | utask->state = UTASK_SSTEP_ACK; |
| 1618 | set_thread_flag(TIF_UPROBE); |
| 1619 | return 1; |
| 1620 | } |
| 1621 | |
| 1622 | static struct notifier_block uprobe_exception_nb = { |
| 1623 | .notifier_call = arch_uprobe_exception_notify, |
| 1624 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ |
| 1625 | }; |
| 1626 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1627 | static int __init init_uprobes(void) |
| 1628 | { |
| 1629 | int i; |
| 1630 | |
| 1631 | for (i = 0; i < UPROBES_HASH_SZ; i++) { |
| 1632 | mutex_init(&uprobes_mutex[i]); |
| 1633 | mutex_init(&uprobes_mmap_mutex[i]); |
| 1634 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1635 | |
| 1636 | return register_die_notifier(&uprobe_exception_nb); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1637 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1638 | module_init(init_uprobes); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1639 | |
| 1640 | static void __exit exit_uprobes(void) |
| 1641 | { |
| 1642 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1643 | module_exit(exit_uprobes); |