Thomas Gleixner | 720e596 | 2019-01-16 12:11:01 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 3 | * User-space Probes (UProbes) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 4 | * |
Ingo Molnar | 35aa621 | 2012-02-22 11:37:29 +0100 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2008-2012 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 6 | * Authors: |
| 7 | * Srikar Dronamraju |
| 8 | * Jim Keniston |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 9 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/highmem.h> |
| 14 | #include <linux/pagemap.h> /* read_mapping_page */ |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 17 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 18 | #include <linux/sched/coredump.h> |
Josh Stone | e8440c1 | 2013-01-13 19:03:34 +0100 | [diff] [blame] | 19 | #include <linux/export.h> |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 20 | #include <linux/rmap.h> /* anon_vma_prepare */ |
| 21 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ |
| 22 | #include <linux/swap.h> /* try_to_free_swap */ |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 23 | #include <linux/ptrace.h> /* user_enable_single_step */ |
| 24 | #include <linux/kdebug.h> /* notifier mechanism */ |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 25 | #include "../../mm/internal.h" /* munlock_vma_page */ |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 26 | #include <linux/percpu-rwsem.h> |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 27 | #include <linux/task_work.h> |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 28 | #include <linux/shmem_fs.h> |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 29 | #include <linux/khugepaged.h> |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 30 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 31 | #include <linux/uprobes.h> |
| 32 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 33 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
| 34 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
| 35 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 36 | static struct rb_root uprobes_tree = RB_ROOT; |
Oleg Nesterov | 441f1eb7 | 2012-11-25 19:54:29 +0100 | [diff] [blame] | 37 | /* |
| 38 | * allows us to skip the uprobe_mmap if there are no uprobe events active |
| 39 | * at this time. Probably a fine grained per inode count is better? |
| 40 | */ |
| 41 | #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 42 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 43 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
| 44 | |
| 45 | #define UPROBES_HASH_SZ 13 |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 46 | /* serialize uprobe->pending_list */ |
| 47 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 48 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 49 | |
Oleg Nesterov | 2bf1acc | 2019-04-23 17:21:02 +0200 | [diff] [blame] | 50 | DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 51 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 52 | /* Have a copy of original instruction */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 53 | #define UPROBE_COPY_INSN 0 |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 54 | |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 55 | struct uprobe { |
| 56 | struct rb_node rb_node; /* node in the rb tree */ |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 57 | refcount_t ref; |
Oleg Nesterov | e591c8d | 2012-11-24 17:29:40 +0100 | [diff] [blame] | 58 | struct rw_semaphore register_rwsem; |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 59 | struct rw_semaphore consumer_rwsem; |
| 60 | struct list_head pending_list; |
| 61 | struct uprobe_consumer *consumers; |
| 62 | struct inode *inode; /* Also hold a ref to inode */ |
| 63 | loff_t offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 64 | loff_t ref_ctr_offset; |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 65 | unsigned long flags; |
Oleg Nesterov | ad43935 | 2013-11-19 17:20:21 +0100 | [diff] [blame] | 66 | |
| 67 | /* |
| 68 | * The generic code assumes that it has two members of unknown type |
| 69 | * owned by the arch-specific code: |
| 70 | * |
| 71 | * insn - copy_insn() saves the original instruction here for |
| 72 | * arch_uprobe_analyze_insn(). |
| 73 | * |
| 74 | * ixol - potentially modified instruction to execute out of |
| 75 | * line, copied to xol_area by xol_get_insn_slot(). |
| 76 | */ |
Srikar Dronamraju | 3ff54ef | 2012-02-22 14:46:02 +0530 | [diff] [blame] | 77 | struct arch_uprobe arch; |
| 78 | }; |
| 79 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 80 | struct delayed_uprobe { |
| 81 | struct list_head list; |
| 82 | struct uprobe *uprobe; |
| 83 | struct mm_struct *mm; |
| 84 | }; |
| 85 | |
| 86 | static DEFINE_MUTEX(delayed_uprobe_lock); |
| 87 | static LIST_HEAD(delayed_uprobe_list); |
| 88 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 89 | /* |
Oleg Nesterov | ad43935 | 2013-11-19 17:20:21 +0100 | [diff] [blame] | 90 | * Execute out of line area: anonymous executable mapping installed |
| 91 | * by the probed task to execute the copy of the original instruction |
| 92 | * mangled by set_swbp(). |
| 93 | * |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 94 | * On a breakpoint hit, thread contests for a slot. It frees the |
| 95 | * slot after singlestep. Currently a fixed number of slots are |
| 96 | * allocated. |
| 97 | */ |
| 98 | struct xol_area { |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 99 | wait_queue_head_t wq; /* if all slots are busy */ |
| 100 | atomic_t slot_count; /* number of in-use slots */ |
| 101 | unsigned long *bitmap; /* 0 = free slot */ |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 102 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 103 | struct vm_special_mapping xol_mapping; |
| 104 | struct page *pages[2]; |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 105 | /* |
| 106 | * We keep the vma's vm_start rather than a pointer to the vma |
| 107 | * itself. The probed process or a naughty kernel module could make |
| 108 | * the vma go away, and we must handle that reasonably gracefully. |
| 109 | */ |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 110 | unsigned long vaddr; /* Page(s) of instruction slots */ |
Oleg Nesterov | c912dae | 2013-11-09 19:49:39 +0100 | [diff] [blame] | 111 | }; |
| 112 | |
| 113 | /* |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 114 | * valid_vma: Verify if the specified vma is an executable vma |
| 115 | * Relax restrictions while unregistering: vm_flags might have |
| 116 | * changed after breakpoint was inserted. |
| 117 | * - is_register: indicates if we are in register context. |
| 118 | * - Return 1 if the specified virtual address is in an |
| 119 | * executable vma. |
| 120 | */ |
| 121 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) |
| 122 | { |
Oleg Nesterov | 13f59c5 | 2014-04-28 20:15:43 +0200 | [diff] [blame] | 123 | vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 124 | |
Oleg Nesterov | e40cfce | 2012-09-16 19:31:39 +0200 | [diff] [blame] | 125 | if (is_register) |
| 126 | flags |= VM_WRITE; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 127 | |
Oleg Nesterov | e40cfce | 2012-09-16 19:31:39 +0200 | [diff] [blame] | 128 | return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 129 | } |
| 130 | |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 131 | static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 132 | { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 133 | return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 134 | } |
| 135 | |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 136 | static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) |
| 137 | { |
| 138 | return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); |
| 139 | } |
| 140 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 141 | /** |
| 142 | * __replace_page - replace page in vma by new page. |
| 143 | * based on replace_page in mm/ksm.c |
| 144 | * |
| 145 | * @vma: vma that holds the pte pointing to page |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 146 | * @addr: address the old @page is mapped at |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 147 | * @old_page: the page we are replacing by new_page |
| 148 | * @new_page: the modified page we replace page by |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 149 | * |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 150 | * If @new_page is NULL, only unmap @old_page. |
| 151 | * |
| 152 | * Returns 0 on success, negative error code otherwise. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 153 | */ |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 154 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 155 | struct page *old_page, struct page *new_page) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 156 | { |
| 157 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 158 | struct page_vma_mapped_walk pvmw = { |
Song Liu | 5a52c9d | 2019-09-23 15:38:27 -0700 | [diff] [blame] | 159 | .page = compound_head(old_page), |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 160 | .vma = vma, |
| 161 | .address = addr, |
| 162 | }; |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 163 | int err; |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 164 | struct mmu_notifier_range range; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 165 | |
Jérôme Glisse | 7269f99 | 2019-05-13 17:20:53 -0700 | [diff] [blame] | 166 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, |
Jérôme Glisse | 6f4f13e | 2019-05-13 17:20:49 -0700 | [diff] [blame] | 167 | addr + PAGE_SIZE); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 168 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 169 | if (new_page) { |
Johannes Weiner | d9eb1ea | 2020-06-03 16:02:24 -0700 | [diff] [blame] | 170 | err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 171 | if (err) |
| 172 | return err; |
| 173 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 174 | |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 175 | /* For try_to_free_swap() and munlock_vma_page() below */ |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 176 | lock_page(old_page); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 177 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 178 | mmu_notifier_invalidate_range_start(&range); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 179 | err = -EAGAIN; |
Johannes Weiner | 9d82c69 | 2020-06-03 16:02:04 -0700 | [diff] [blame] | 180 | if (!page_vma_mapped_walk(&pvmw)) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 181 | goto unlock; |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 182 | VM_BUG_ON_PAGE(addr != pvmw.address, old_page); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 183 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 184 | if (new_page) { |
| 185 | get_page(new_page); |
Johannes Weiner | be5d0a7 | 2020-06-03 16:01:57 -0700 | [diff] [blame] | 186 | page_add_new_anon_rmap(new_page, vma, addr, false); |
Joonsoo Kim | b518154 | 2020-08-11 18:30:40 -0700 | [diff] [blame] | 187 | lru_cache_add_inactive_or_unevictable(new_page, vma); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 188 | } else |
| 189 | /* no new page, just dec_mm_counter for old_page */ |
| 190 | dec_mm_counter(mm, MM_ANONPAGES); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 191 | |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 192 | if (!PageAnon(old_page)) { |
| 193 | dec_mm_counter(mm, mm_counter_file(old_page)); |
Srikar Dronamraju | 7396fa8 | 2012-04-11 16:05:16 +0530 | [diff] [blame] | 194 | inc_mm_counter(mm, MM_ANONPAGES); |
| 195 | } |
| 196 | |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 197 | flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); |
| 198 | ptep_clear_flush_notify(vma, addr, pvmw.pte); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 199 | if (new_page) |
| 200 | set_pte_at_notify(mm, addr, pvmw.pte, |
| 201 | mk_pte(new_page, vma->vm_page_prot)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 202 | |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 203 | page_remove_rmap(old_page, false); |
| 204 | if (!page_mapped(old_page)) |
| 205 | try_to_free_swap(old_page); |
Kirill A. Shutemov | 14fa2da | 2017-02-24 14:58:07 -0800 | [diff] [blame] | 206 | page_vma_mapped_walk_done(&pvmw); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 207 | |
Hugh Dickins | c17c3dc | 2020-08-20 17:42:17 -0700 | [diff] [blame] | 208 | if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 209 | munlock_vma_page(old_page); |
| 210 | put_page(old_page); |
Oleg Nesterov | 194f8dc | 2012-07-29 20:22:49 +0200 | [diff] [blame] | 211 | |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 212 | err = 0; |
| 213 | unlock: |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 214 | mmu_notifier_invalidate_range_end(&range); |
Oleg Nesterov | bdfaa2e | 2016-08-17 17:37:04 +0200 | [diff] [blame] | 215 | unlock_page(old_page); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 216 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 220 | * is_swbp_insn - check if instruction is breakpoint instruction. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 221 | * @insn: instruction to be checked. |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 222 | * Default implementation of is_swbp_insn |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 223 | * Returns true if @insn is a breakpoint instruction. |
| 224 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 225 | bool __weak is_swbp_insn(uprobe_opcode_t *insn) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 226 | { |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 227 | return *insn == UPROBE_SWBP_INSN; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 228 | } |
| 229 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 230 | /** |
| 231 | * is_trap_insn - check if instruction is breakpoint instruction. |
| 232 | * @insn: instruction to be checked. |
| 233 | * Default implementation of is_trap_insn |
| 234 | * Returns true if @insn is a breakpoint instruction. |
| 235 | * |
| 236 | * This function is needed for the case where an architecture has multiple |
| 237 | * trap instructions (like powerpc). |
| 238 | */ |
| 239 | bool __weak is_trap_insn(uprobe_opcode_t *insn) |
| 240 | { |
| 241 | return is_swbp_insn(insn); |
| 242 | } |
| 243 | |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 244 | static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) |
Oleg Nesterov | cceb55a | 2012-09-23 21:10:18 +0200 | [diff] [blame] | 245 | { |
| 246 | void *kaddr = kmap_atomic(page); |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 247 | memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); |
Oleg Nesterov | cceb55a | 2012-09-23 21:10:18 +0200 | [diff] [blame] | 248 | kunmap_atomic(kaddr); |
| 249 | } |
| 250 | |
Oleg Nesterov | 5669cce | 2013-03-24 18:58:04 +0100 | [diff] [blame] | 251 | static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) |
| 252 | { |
| 253 | void *kaddr = kmap_atomic(page); |
| 254 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); |
| 255 | kunmap_atomic(kaddr); |
| 256 | } |
| 257 | |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 258 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) |
| 259 | { |
| 260 | uprobe_opcode_t old_opcode; |
| 261 | bool is_swbp; |
| 262 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 263 | /* |
| 264 | * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. |
| 265 | * We do not check if it is any other 'trap variant' which could |
| 266 | * be conditional trap instruction such as the one powerpc supports. |
| 267 | * |
| 268 | * The logic is that we do not care if the underlying instruction |
| 269 | * is a trap variant; uprobes always wins over any other (gdb) |
| 270 | * breakpoint. |
| 271 | */ |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 272 | copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 273 | is_swbp = is_swbp_insn(&old_opcode); |
| 274 | |
| 275 | if (is_swbp_insn(new_opcode)) { |
| 276 | if (is_swbp) /* register: already installed? */ |
| 277 | return 0; |
| 278 | } else { |
| 279 | if (!is_swbp) /* unregister: was it changed by us? */ |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 280 | return 0; |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | return 1; |
| 284 | } |
| 285 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 286 | static struct delayed_uprobe * |
| 287 | delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) |
| 288 | { |
| 289 | struct delayed_uprobe *du; |
| 290 | |
| 291 | list_for_each_entry(du, &delayed_uprobe_list, list) |
| 292 | if (du->uprobe == uprobe && du->mm == mm) |
| 293 | return du; |
| 294 | return NULL; |
| 295 | } |
| 296 | |
| 297 | static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) |
| 298 | { |
| 299 | struct delayed_uprobe *du; |
| 300 | |
| 301 | if (delayed_uprobe_check(uprobe, mm)) |
| 302 | return 0; |
| 303 | |
| 304 | du = kzalloc(sizeof(*du), GFP_KERNEL); |
| 305 | if (!du) |
| 306 | return -ENOMEM; |
| 307 | |
| 308 | du->uprobe = uprobe; |
| 309 | du->mm = mm; |
| 310 | list_add(&du->list, &delayed_uprobe_list); |
| 311 | return 0; |
| 312 | } |
| 313 | |
| 314 | static void delayed_uprobe_delete(struct delayed_uprobe *du) |
| 315 | { |
| 316 | if (WARN_ON(!du)) |
| 317 | return; |
| 318 | list_del(&du->list); |
| 319 | kfree(du); |
| 320 | } |
| 321 | |
| 322 | static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) |
| 323 | { |
| 324 | struct list_head *pos, *q; |
| 325 | struct delayed_uprobe *du; |
| 326 | |
| 327 | if (!uprobe && !mm) |
| 328 | return; |
| 329 | |
| 330 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
| 331 | du = list_entry(pos, struct delayed_uprobe, list); |
| 332 | |
| 333 | if (uprobe && du->uprobe != uprobe) |
| 334 | continue; |
| 335 | if (mm && du->mm != mm) |
| 336 | continue; |
| 337 | |
| 338 | delayed_uprobe_delete(du); |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | static bool valid_ref_ctr_vma(struct uprobe *uprobe, |
| 343 | struct vm_area_struct *vma) |
| 344 | { |
| 345 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); |
| 346 | |
| 347 | return uprobe->ref_ctr_offset && |
| 348 | vma->vm_file && |
| 349 | file_inode(vma->vm_file) == uprobe->inode && |
| 350 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
| 351 | vma->vm_start <= vaddr && |
| 352 | vma->vm_end > vaddr; |
| 353 | } |
| 354 | |
| 355 | static struct vm_area_struct * |
| 356 | find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) |
| 357 | { |
| 358 | struct vm_area_struct *tmp; |
| 359 | |
| 360 | for (tmp = mm->mmap; tmp; tmp = tmp->vm_next) |
| 361 | if (valid_ref_ctr_vma(uprobe, tmp)) |
| 362 | return tmp; |
| 363 | |
| 364 | return NULL; |
| 365 | } |
| 366 | |
| 367 | static int |
| 368 | __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) |
| 369 | { |
| 370 | void *kaddr; |
| 371 | struct page *page; |
| 372 | struct vm_area_struct *vma; |
| 373 | int ret; |
| 374 | short *ptr; |
| 375 | |
| 376 | if (!vaddr || !d) |
| 377 | return -EINVAL; |
| 378 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 379 | ret = get_user_pages_remote(mm, vaddr, 1, |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 380 | FOLL_WRITE, &page, &vma, NULL); |
| 381 | if (unlikely(ret <= 0)) { |
| 382 | /* |
| 383 | * We are asking for 1 page. If get_user_pages_remote() fails, |
| 384 | * it may return 0, in that case we have to return error. |
| 385 | */ |
| 386 | return ret == 0 ? -EBUSY : ret; |
| 387 | } |
| 388 | |
| 389 | kaddr = kmap_atomic(page); |
| 390 | ptr = kaddr + (vaddr & ~PAGE_MASK); |
| 391 | |
| 392 | if (unlikely(*ptr + d < 0)) { |
| 393 | pr_warn("ref_ctr going negative. vaddr: 0x%lx, " |
| 394 | "curr val: %d, delta: %d\n", vaddr, *ptr, d); |
| 395 | ret = -EINVAL; |
| 396 | goto out; |
| 397 | } |
| 398 | |
| 399 | *ptr += d; |
| 400 | ret = 0; |
| 401 | out: |
| 402 | kunmap_atomic(kaddr); |
| 403 | put_page(page); |
| 404 | return ret; |
| 405 | } |
| 406 | |
| 407 | static void update_ref_ctr_warn(struct uprobe *uprobe, |
| 408 | struct mm_struct *mm, short d) |
| 409 | { |
| 410 | pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " |
| 411 | "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", |
| 412 | d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, |
| 413 | (unsigned long long) uprobe->offset, |
| 414 | (unsigned long long) uprobe->ref_ctr_offset, mm); |
| 415 | } |
| 416 | |
| 417 | static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, |
| 418 | short d) |
| 419 | { |
| 420 | struct vm_area_struct *rc_vma; |
| 421 | unsigned long rc_vaddr; |
| 422 | int ret = 0; |
| 423 | |
| 424 | rc_vma = find_ref_ctr_vma(uprobe, mm); |
| 425 | |
| 426 | if (rc_vma) { |
| 427 | rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); |
| 428 | ret = __update_ref_ctr(mm, rc_vaddr, d); |
| 429 | if (ret) |
| 430 | update_ref_ctr_warn(uprobe, mm, d); |
| 431 | |
| 432 | if (d > 0) |
| 433 | return ret; |
| 434 | } |
| 435 | |
| 436 | mutex_lock(&delayed_uprobe_lock); |
| 437 | if (d > 0) |
| 438 | ret = delayed_uprobe_add(uprobe, mm); |
| 439 | else |
| 440 | delayed_uprobe_remove(uprobe, mm); |
| 441 | mutex_unlock(&delayed_uprobe_lock); |
| 442 | |
| 443 | return ret; |
| 444 | } |
| 445 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 446 | /* |
| 447 | * NOTE: |
| 448 | * Expect the breakpoint instruction to be the smallest size instruction for |
| 449 | * the architecture. If an arch has variable length instruction and the |
| 450 | * breakpoint instruction is not of the smallest length instruction |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 451 | * supported by that architecture then we need to modify is_trap_at_addr and |
Oleg Nesterov | f72d41f | 2013-11-05 19:50:39 +0100 | [diff] [blame] | 452 | * uprobe_write_opcode accordingly. This would never be a problem for archs |
| 453 | * that have fixed length instructions. |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 454 | * |
Oleg Nesterov | f72d41f | 2013-11-05 19:50:39 +0100 | [diff] [blame] | 455 | * uprobe_write_opcode - write the opcode at a given virtual address. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 456 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 457 | * @vaddr: the virtual address to store the opcode. |
| 458 | * @opcode: opcode to be written at @vaddr. |
| 459 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 460 | * Called with mm->mmap_lock held for write. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 461 | * Return 0 (success) or a negative errno. |
| 462 | */ |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 463 | int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
| 464 | unsigned long vaddr, uprobe_opcode_t opcode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 465 | { |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 466 | struct uprobe *uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 467 | struct page *old_page, *new_page; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 468 | struct vm_area_struct *vma; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 469 | int ret, is_register, ref_ctr_updated = 0; |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 470 | bool orig_page_huge = false; |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 471 | unsigned int gup_flags = FOLL_FORCE; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 472 | |
| 473 | is_register = is_swbp_insn(&opcode); |
| 474 | uprobe = container_of(auprobe, struct uprobe, arch); |
Oleg Nesterov | f403072 | 2012-07-29 20:22:12 +0200 | [diff] [blame] | 475 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 476 | retry: |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 477 | if (is_register) |
| 478 | gup_flags |= FOLL_SPLIT_PMD; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 479 | /* Read the page with vaddr into memory */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 480 | ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 481 | &old_page, &vma, NULL); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 482 | if (ret <= 0) |
| 483 | return ret; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 484 | |
Oleg Nesterov | ed6f6a5 | 2012-09-23 21:30:44 +0200 | [diff] [blame] | 485 | ret = verify_opcode(old_page, vaddr, &opcode); |
| 486 | if (ret <= 0) |
| 487 | goto put_old; |
| 488 | |
Song Liu | aa5de30 | 2019-10-18 20:20:40 -0700 | [diff] [blame] | 489 | if (WARN(!is_register && PageCompound(old_page), |
| 490 | "uprobe unregister should never work on compound page\n")) { |
| 491 | ret = -EINVAL; |
| 492 | goto put_old; |
| 493 | } |
| 494 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 495 | /* We are going to replace instruction, update ref_ctr. */ |
| 496 | if (!ref_ctr_updated && uprobe->ref_ctr_offset) { |
| 497 | ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); |
| 498 | if (ret) |
| 499 | goto put_old; |
| 500 | |
| 501 | ref_ctr_updated = 1; |
| 502 | } |
| 503 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 504 | ret = 0; |
| 505 | if (!is_register && !PageAnon(old_page)) |
| 506 | goto put_old; |
| 507 | |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 508 | ret = anon_vma_prepare(vma); |
| 509 | if (ret) |
| 510 | goto put_old; |
| 511 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 512 | ret = -ENOMEM; |
| 513 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); |
| 514 | if (!new_page) |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 515 | goto put_old; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 516 | |
Oleg Nesterov | 29dedee | 2014-05-05 16:38:18 +0200 | [diff] [blame] | 517 | __SetPageUptodate(new_page); |
Oleg Nesterov | 3f47107 | 2013-03-24 19:04:36 +0100 | [diff] [blame] | 518 | copy_highpage(new_page, old_page); |
| 519 | copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 520 | |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 521 | if (!is_register) { |
| 522 | struct page *orig_page; |
| 523 | pgoff_t index; |
| 524 | |
| 525 | VM_BUG_ON_PAGE(!PageAnon(old_page), old_page); |
| 526 | |
| 527 | index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; |
| 528 | orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, |
| 529 | index); |
| 530 | |
| 531 | if (orig_page) { |
| 532 | if (PageUptodate(orig_page) && |
| 533 | pages_identical(new_page, orig_page)) { |
| 534 | /* let go new_page */ |
| 535 | put_page(new_page); |
| 536 | new_page = NULL; |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 537 | |
| 538 | if (PageCompound(orig_page)) |
| 539 | orig_page_huge = true; |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 540 | } |
| 541 | put_page(orig_page); |
| 542 | } |
| 543 | } |
| 544 | |
Oleg Nesterov | c517ee7 | 2012-07-29 20:22:16 +0200 | [diff] [blame] | 545 | ret = __replace_page(vma, vaddr, old_page, new_page); |
Song Liu | fb4fb04 | 2019-09-23 15:38:22 -0700 | [diff] [blame] | 546 | if (new_page) |
| 547 | put_page(new_page); |
Oleg Nesterov | 9f92448 | 2012-07-29 20:22:20 +0200 | [diff] [blame] | 548 | put_old: |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 549 | put_page(old_page); |
| 550 | |
Oleg Nesterov | 5323ce7 | 2012-06-15 17:43:28 +0200 | [diff] [blame] | 551 | if (unlikely(ret == -EAGAIN)) |
| 552 | goto retry; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 553 | |
| 554 | /* Revert back reference counter if instruction update failed. */ |
| 555 | if (ret && is_register && ref_ctr_updated) |
| 556 | update_ref_ctr(uprobe, mm, -1); |
| 557 | |
Song Liu | f385cb8 | 2019-09-23 15:38:33 -0700 | [diff] [blame] | 558 | /* try collapse pmd for compound page */ |
| 559 | if (!ret && orig_page_huge) |
| 560 | collapse_pte_mapped_thp(mm, vaddr); |
| 561 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 562 | return ret; |
| 563 | } |
| 564 | |
| 565 | /** |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 566 | * set_swbp - store breakpoint at a given address. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 567 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 568 | * @mm: the probed process address space. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 569 | * @vaddr: the virtual address to insert the opcode. |
| 570 | * |
| 571 | * For mm @mm, store the breakpoint instruction at @vaddr. |
| 572 | * Return 0 (success) or a negative errno. |
| 573 | */ |
Srikar Dronamraju | 5cb4ac3 | 2012-03-12 14:55:45 +0530 | [diff] [blame] | 574 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 575 | { |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 576 | return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 577 | } |
| 578 | |
| 579 | /** |
| 580 | * set_orig_insn - Restore the original instruction. |
| 581 | * @mm: the probed process address space. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 582 | * @auprobe: arch specific probepoint information. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 583 | * @vaddr: the virtual address to insert the opcode. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 584 | * |
| 585 | * For mm @mm, restore the original opcode (opcode) at @vaddr. |
| 586 | * Return 0 (success) or a negative errno. |
| 587 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 588 | int __weak |
Oleg Nesterov | ded86e7 | 2012-08-08 18:07:03 +0200 | [diff] [blame] | 589 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 590 | { |
Ravi Bangoria | 6d43743 | 2018-08-09 09:48:52 +0530 | [diff] [blame] | 591 | return uprobe_write_opcode(auprobe, mm, vaddr, |
| 592 | *(uprobe_opcode_t *)&auprobe->insn); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 593 | } |
| 594 | |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 595 | static struct uprobe *get_uprobe(struct uprobe *uprobe) |
| 596 | { |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 597 | refcount_inc(&uprobe->ref); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 598 | return uprobe; |
| 599 | } |
| 600 | |
| 601 | static void put_uprobe(struct uprobe *uprobe) |
| 602 | { |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 603 | if (refcount_dec_and_test(&uprobe->ref)) { |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 604 | /* |
| 605 | * If application munmap(exec_vma) before uprobe_unregister() |
| 606 | * gets called, we don't get a chance to remove uprobe from |
| 607 | * delayed_uprobe_list from remove_breakpoint(). Do it here. |
| 608 | */ |
Ravi Bangoria | 1aed58e | 2018-12-05 09:04:23 +0530 | [diff] [blame] | 609 | mutex_lock(&delayed_uprobe_lock); |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 610 | delayed_uprobe_remove(uprobe, NULL); |
Ravi Bangoria | 1aed58e | 2018-12-05 09:04:23 +0530 | [diff] [blame] | 611 | mutex_unlock(&delayed_uprobe_lock); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 612 | kfree(uprobe); |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 613 | } |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 614 | } |
| 615 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 616 | static __always_inline |
| 617 | int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, |
| 618 | const struct uprobe *r) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 619 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 620 | if (l_inode < r->inode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 621 | return -1; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 622 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 623 | if (l_inode > r->inode) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 624 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 625 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 626 | if (l_offset < r->offset) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 627 | return -1; |
| 628 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 629 | if (l_offset > r->offset) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 630 | return 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 635 | #define __node_2_uprobe(node) \ |
| 636 | rb_entry((node), struct uprobe, rb_node) |
| 637 | |
| 638 | struct __uprobe_key { |
| 639 | struct inode *inode; |
| 640 | loff_t offset; |
| 641 | }; |
| 642 | |
| 643 | static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b) |
| 644 | { |
| 645 | const struct __uprobe_key *a = key; |
| 646 | return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b)); |
| 647 | } |
| 648 | |
| 649 | static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b) |
| 650 | { |
| 651 | struct uprobe *u = __node_2_uprobe(a); |
| 652 | return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b)); |
| 653 | } |
| 654 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 655 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) |
| 656 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 657 | struct __uprobe_key key = { |
| 658 | .inode = inode, |
| 659 | .offset = offset, |
| 660 | }; |
| 661 | struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 662 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 663 | if (node) |
Sven Schnelle | b0d6d47 | 2021-02-09 16:07:11 +0100 | [diff] [blame] | 664 | return get_uprobe(__node_2_uprobe(node)); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 665 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 666 | return NULL; |
| 667 | } |
| 668 | |
| 669 | /* |
| 670 | * Find a uprobe corresponding to a given inode:offset |
| 671 | * Acquires uprobes_treelock |
| 672 | */ |
| 673 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) |
| 674 | { |
| 675 | struct uprobe *uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 676 | |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 677 | spin_lock(&uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 678 | uprobe = __find_uprobe(inode, offset); |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 679 | spin_unlock(&uprobes_treelock); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 680 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 681 | return uprobe; |
| 682 | } |
| 683 | |
| 684 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) |
| 685 | { |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 686 | struct rb_node *node; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 687 | |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 688 | node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp); |
| 689 | if (node) |
| 690 | return get_uprobe(__node_2_uprobe(node)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 691 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 692 | /* get access + creation ref */ |
Elena Reshetova | ce59b8e | 2019-01-16 13:20:27 +0200 | [diff] [blame] | 693 | refcount_set(&uprobe->ref, 2); |
Peter Zijlstra | a905e84 | 2020-04-29 17:06:27 +0200 | [diff] [blame] | 694 | return NULL; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | /* |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 698 | * Acquire uprobes_treelock. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 699 | * Matching uprobe already exists in rbtree; |
| 700 | * increment (access refcount) and return the matching uprobe. |
| 701 | * |
| 702 | * No matching uprobe; insert the uprobe in rb_tree; |
| 703 | * get a double refcount (access + creation) and return NULL. |
| 704 | */ |
| 705 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) |
| 706 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 707 | struct uprobe *u; |
| 708 | |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 709 | spin_lock(&uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 710 | u = __insert_uprobe(uprobe); |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 711 | spin_unlock(&uprobes_treelock); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 712 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 713 | return u; |
| 714 | } |
| 715 | |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 716 | static void |
| 717 | ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) |
| 718 | { |
| 719 | pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " |
| 720 | "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", |
| 721 | uprobe->inode->i_ino, (unsigned long long) uprobe->offset, |
| 722 | (unsigned long long) cur_uprobe->ref_ctr_offset, |
| 723 | (unsigned long long) uprobe->ref_ctr_offset); |
| 724 | } |
| 725 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 726 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, |
| 727 | loff_t ref_ctr_offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 728 | { |
| 729 | struct uprobe *uprobe, *cur_uprobe; |
| 730 | |
| 731 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); |
| 732 | if (!uprobe) |
| 733 | return NULL; |
| 734 | |
Song Liu | 61f9420 | 2018-04-23 10:21:35 -0700 | [diff] [blame] | 735 | uprobe->inode = inode; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 736 | uprobe->offset = offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 737 | uprobe->ref_ctr_offset = ref_ctr_offset; |
Oleg Nesterov | e591c8d | 2012-11-24 17:29:40 +0100 | [diff] [blame] | 738 | init_rwsem(&uprobe->register_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 739 | init_rwsem(&uprobe->consumer_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 740 | |
| 741 | /* add to uprobes_tree, sorted on inode:offset */ |
| 742 | cur_uprobe = insert_uprobe(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 743 | /* a uprobe exists for this inode:offset combination */ |
| 744 | if (cur_uprobe) { |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 745 | if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { |
| 746 | ref_ctr_mismatch_warn(cur_uprobe, uprobe); |
| 747 | put_uprobe(cur_uprobe); |
| 748 | kfree(uprobe); |
| 749 | return ERR_PTR(-EINVAL); |
| 750 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 751 | kfree(uprobe); |
| 752 | uprobe = cur_uprobe; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 753 | } |
| 754 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 755 | return uprobe; |
| 756 | } |
| 757 | |
Oleg Nesterov | 9a98e03 | 2012-11-23 20:15:17 +0100 | [diff] [blame] | 758 | static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 759 | { |
| 760 | down_write(&uprobe->consumer_rwsem); |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 761 | uc->next = uprobe->consumers; |
| 762 | uprobe->consumers = uc; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 763 | up_write(&uprobe->consumer_rwsem); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 764 | } |
| 765 | |
| 766 | /* |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 767 | * For uprobe @uprobe, delete the consumer @uc. |
| 768 | * Return true if the @uc is deleted successfully |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 769 | * or return false. |
| 770 | */ |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 771 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 772 | { |
| 773 | struct uprobe_consumer **con; |
| 774 | bool ret = false; |
| 775 | |
| 776 | down_write(&uprobe->consumer_rwsem); |
| 777 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 778 | if (*con == uc) { |
| 779 | *con = uc->next; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 780 | ret = true; |
| 781 | break; |
| 782 | } |
| 783 | } |
| 784 | up_write(&uprobe->consumer_rwsem); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 785 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 786 | return ret; |
| 787 | } |
| 788 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 789 | static int __copy_insn(struct address_space *mapping, struct file *filp, |
| 790 | void *insn, int nbytes, loff_t offset) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 791 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 792 | struct page *page; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 793 | /* |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 794 | * Ensure that the page that has the original instruction is populated |
| 795 | * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), |
| 796 | * see uprobe_register(). |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 797 | */ |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 798 | if (mapping->a_ops->readpage) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 799 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 800 | else |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 801 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 802 | if (IS_ERR(page)) |
| 803 | return PTR_ERR(page); |
| 804 | |
Oleg Nesterov | 2edb7b5 | 2013-03-24 18:37:48 +0100 | [diff] [blame] | 805 | copy_from_page(page, offset, insn, nbytes); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 806 | put_page(page); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 807 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 808 | return 0; |
| 809 | } |
| 810 | |
Oleg Nesterov | d436615 | 2012-06-15 17:43:42 +0200 | [diff] [blame] | 811 | static int copy_insn(struct uprobe *uprobe, struct file *filp) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 812 | { |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 813 | struct address_space *mapping = uprobe->inode->i_mapping; |
| 814 | loff_t offs = uprobe->offset; |
Oleg Nesterov | 803200e | 2013-11-09 17:58:54 +0100 | [diff] [blame] | 815 | void *insn = &uprobe->arch.insn; |
| 816 | int size = sizeof(uprobe->arch.insn); |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 817 | int len, err = -EIO; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 818 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 819 | /* Copy only available bytes, -EIO if nothing was read */ |
| 820 | do { |
| 821 | if (offs >= i_size_read(uprobe->inode)) |
| 822 | break; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 823 | |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 824 | len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); |
| 825 | err = __copy_insn(mapping, filp, insn, len, offs); |
Oleg Nesterov | fc36f59 | 2012-06-15 17:43:44 +0200 | [diff] [blame] | 826 | if (err) |
Oleg Nesterov | 2ded098 | 2013-11-07 19:41:57 +0100 | [diff] [blame] | 827 | break; |
| 828 | |
| 829 | insn += len; |
| 830 | offs += len; |
| 831 | size -= len; |
| 832 | } while (size); |
| 833 | |
| 834 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 835 | } |
| 836 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 837 | static int prepare_uprobe(struct uprobe *uprobe, struct file *file, |
| 838 | struct mm_struct *mm, unsigned long vaddr) |
| 839 | { |
| 840 | int ret = 0; |
| 841 | |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 842 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 843 | return ret; |
| 844 | |
Oleg Nesterov | d4d3ccc | 2012-11-24 18:51:34 +0100 | [diff] [blame] | 845 | /* TODO: move this into _register, until then we abuse this sem. */ |
| 846 | down_write(&uprobe->consumer_rwsem); |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 847 | if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) |
Oleg Nesterov | 4710f05f | 2012-09-30 20:31:41 +0200 | [diff] [blame] | 848 | goto out; |
| 849 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 850 | ret = copy_insn(uprobe, file); |
| 851 | if (ret) |
| 852 | goto out; |
| 853 | |
| 854 | ret = -ENOTSUPP; |
Oleg Nesterov | 803200e | 2013-11-09 17:58:54 +0100 | [diff] [blame] | 855 | if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 856 | goto out; |
| 857 | |
| 858 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); |
| 859 | if (ret) |
| 860 | goto out; |
| 861 | |
Andrea Parri | 09d3f01 | 2018-11-22 17:10:31 +0100 | [diff] [blame] | 862 | smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 863 | set_bit(UPROBE_COPY_INSN, &uprobe->flags); |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 864 | |
| 865 | out: |
Oleg Nesterov | d4d3ccc | 2012-11-24 18:51:34 +0100 | [diff] [blame] | 866 | up_write(&uprobe->consumer_rwsem); |
Oleg Nesterov | 4710f05f | 2012-09-30 20:31:41 +0200 | [diff] [blame] | 867 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 868 | return ret; |
| 869 | } |
| 870 | |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 871 | static inline bool consumer_filter(struct uprobe_consumer *uc, |
| 872 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 873 | { |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 874 | return !uc->filter || uc->filter(uc, ctx, mm); |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 875 | } |
| 876 | |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 877 | static bool filter_chain(struct uprobe *uprobe, |
| 878 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 879 | { |
Oleg Nesterov | 1ff6fee | 2012-11-24 18:15:46 +0100 | [diff] [blame] | 880 | struct uprobe_consumer *uc; |
| 881 | bool ret = false; |
| 882 | |
| 883 | down_read(&uprobe->consumer_rwsem); |
| 884 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 885 | ret = consumer_filter(uc, ctx, mm); |
Oleg Nesterov | 1ff6fee | 2012-11-24 18:15:46 +0100 | [diff] [blame] | 886 | if (ret) |
| 887 | break; |
| 888 | } |
| 889 | up_read(&uprobe->consumer_rwsem); |
| 890 | |
| 891 | return ret; |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 892 | } |
| 893 | |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 894 | static int |
| 895 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 896 | struct vm_area_struct *vma, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 897 | { |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 898 | bool first_uprobe; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 899 | int ret; |
| 900 | |
Oleg Nesterov | cb9a19f | 2012-09-30 20:11:45 +0200 | [diff] [blame] | 901 | ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); |
| 902 | if (ret) |
| 903 | return ret; |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 904 | |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 905 | /* |
| 906 | * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), |
| 907 | * the task can hit this breakpoint right after __replace_page(). |
| 908 | */ |
| 909 | first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); |
| 910 | if (first_uprobe) |
| 911 | set_bit(MMF_HAS_UPROBES, &mm->flags); |
| 912 | |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 913 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 914 | if (!ret) |
| 915 | clear_bit(MMF_RECALC_UPROBES, &mm->flags); |
| 916 | else if (first_uprobe) |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 917 | clear_bit(MMF_HAS_UPROBES, &mm->flags); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 918 | |
| 919 | return ret; |
| 920 | } |
| 921 | |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 922 | static int |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 923 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 924 | { |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 925 | set_bit(MMF_RECALC_UPROBES, &mm->flags); |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 926 | return set_orig_insn(&uprobe->arch, mm, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 927 | } |
| 928 | |
Oleg Nesterov | 06b7bcd | 2012-11-25 22:01:42 +0100 | [diff] [blame] | 929 | static inline bool uprobe_is_active(struct uprobe *uprobe) |
| 930 | { |
| 931 | return !RB_EMPTY_NODE(&uprobe->rb_node); |
| 932 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 933 | /* |
Oleg Nesterov | 778b032 | 2012-05-29 21:30:08 +0200 | [diff] [blame] | 934 | * There could be threads that have already hit the breakpoint. They |
| 935 | * will recheck the current insn and restart if find_uprobe() fails. |
| 936 | * See find_active_uprobe(). |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 937 | */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 938 | static void delete_uprobe(struct uprobe *uprobe) |
| 939 | { |
Oleg Nesterov | 06b7bcd | 2012-11-25 22:01:42 +0100 | [diff] [blame] | 940 | if (WARN_ON(!uprobe_is_active(uprobe))) |
| 941 | return; |
| 942 | |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 943 | spin_lock(&uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 944 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 945 | spin_unlock(&uprobes_treelock); |
Oleg Nesterov | 06b7bcd | 2012-11-25 22:01:42 +0100 | [diff] [blame] | 946 | RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 947 | put_uprobe(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 950 | struct map_info { |
| 951 | struct map_info *next; |
| 952 | struct mm_struct *mm; |
Oleg Nesterov | 816c03f | 2012-06-15 17:43:55 +0200 | [diff] [blame] | 953 | unsigned long vaddr; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 954 | }; |
| 955 | |
| 956 | static inline struct map_info *free_map_info(struct map_info *info) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 957 | { |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 958 | struct map_info *next = info->next; |
| 959 | kfree(info); |
| 960 | return next; |
| 961 | } |
| 962 | |
| 963 | static struct map_info * |
| 964 | build_map_info(struct address_space *mapping, loff_t offset, bool is_register) |
| 965 | { |
| 966 | unsigned long pgoff = offset >> PAGE_SHIFT; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 967 | struct vm_area_struct *vma; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 968 | struct map_info *curr = NULL; |
| 969 | struct map_info *prev = NULL; |
| 970 | struct map_info *info; |
| 971 | int more = 0; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 972 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 973 | again: |
Davidlohr Bueso | 4a23717a | 2014-12-12 16:54:30 -0800 | [diff] [blame] | 974 | i_mmap_lock_read(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 975 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 976 | if (!valid_vma(vma, is_register)) |
| 977 | continue; |
| 978 | |
Oleg Nesterov | 7a5bfb6 | 2012-06-15 17:43:36 +0200 | [diff] [blame] | 979 | if (!prev && !more) { |
| 980 | /* |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 981 | * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through |
Oleg Nesterov | 7a5bfb6 | 2012-06-15 17:43:36 +0200 | [diff] [blame] | 982 | * reclaim. This is optimistic, no harm done if it fails. |
| 983 | */ |
| 984 | prev = kmalloc(sizeof(struct map_info), |
| 985 | GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); |
| 986 | if (prev) |
| 987 | prev->next = NULL; |
| 988 | } |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 989 | if (!prev) { |
| 990 | more++; |
| 991 | continue; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 992 | } |
| 993 | |
Vegard Nossum | 388f793 | 2017-02-27 14:30:13 -0800 | [diff] [blame] | 994 | if (!mmget_not_zero(vma->vm_mm)) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 995 | continue; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 996 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 997 | info = prev; |
| 998 | prev = prev->next; |
| 999 | info->next = curr; |
| 1000 | curr = info; |
| 1001 | |
| 1002 | info->mm = vma->vm_mm; |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1003 | info->vaddr = offset_to_vaddr(vma, offset); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1004 | } |
Davidlohr Bueso | 4a23717a | 2014-12-12 16:54:30 -0800 | [diff] [blame] | 1005 | i_mmap_unlock_read(mapping); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1006 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1007 | if (!more) |
| 1008 | goto out; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1009 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1010 | prev = curr; |
| 1011 | while (curr) { |
| 1012 | mmput(curr->mm); |
| 1013 | curr = curr->next; |
| 1014 | } |
| 1015 | |
| 1016 | do { |
| 1017 | info = kmalloc(sizeof(struct map_info), GFP_KERNEL); |
| 1018 | if (!info) { |
| 1019 | curr = ERR_PTR(-ENOMEM); |
| 1020 | goto out; |
| 1021 | } |
| 1022 | info->next = prev; |
| 1023 | prev = info; |
| 1024 | } while (--more); |
| 1025 | |
| 1026 | goto again; |
| 1027 | out: |
| 1028 | while (prev) |
| 1029 | prev = free_map_info(prev); |
| 1030 | return curr; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1031 | } |
| 1032 | |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1033 | static int |
| 1034 | register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1035 | { |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1036 | bool is_register = !!new; |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1037 | struct map_info *info; |
| 1038 | int err = 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1039 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1040 | percpu_down_write(&dup_mmap_sem); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1041 | info = build_map_info(uprobe->inode->i_mapping, |
| 1042 | uprobe->offset, is_register); |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1043 | if (IS_ERR(info)) { |
| 1044 | err = PTR_ERR(info); |
| 1045 | goto out; |
| 1046 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1047 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1048 | while (info) { |
| 1049 | struct mm_struct *mm = info->mm; |
| 1050 | struct vm_area_struct *vma; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1051 | |
Oleg Nesterov | 076a365 | 2012-09-30 18:54:53 +0200 | [diff] [blame] | 1052 | if (err && is_register) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1053 | goto free; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1054 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1055 | mmap_write_lock(mm); |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 1056 | vma = find_vma(mm, info->vaddr); |
| 1057 | if (!vma || !valid_vma(vma, is_register) || |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1058 | file_inode(vma->vm_file) != uprobe->inode) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1059 | goto unlock; |
| 1060 | |
Oleg Nesterov | f4d6dfe | 2012-07-29 20:22:44 +0200 | [diff] [blame] | 1061 | if (vma->vm_start > info->vaddr || |
| 1062 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1063 | goto unlock; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1064 | |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1065 | if (is_register) { |
| 1066 | /* consult only the "caller", new consumer. */ |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1067 | if (consumer_filter(new, |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 1068 | UPROBE_FILTER_REGISTER, mm)) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1069 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); |
| 1070 | } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) { |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 1071 | if (!filter_chain(uprobe, |
| 1072 | UPROBE_FILTER_UNREGISTER, mm)) |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1073 | err |= remove_breakpoint(uprobe, mm, info->vaddr); |
| 1074 | } |
Oleg Nesterov | 78f7411 | 2012-08-08 17:35:08 +0200 | [diff] [blame] | 1075 | |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1076 | unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1077 | mmap_write_unlock(mm); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1078 | free: |
| 1079 | mmput(mm); |
| 1080 | info = free_map_info(info); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1081 | } |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1082 | out: |
| 1083 | percpu_up_write(&dup_mmap_sem); |
Oleg Nesterov | 2687209 | 2012-06-15 17:43:33 +0200 | [diff] [blame] | 1084 | return err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1085 | } |
| 1086 | |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1087 | static void |
| 1088 | __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1089 | { |
Oleg Nesterov | 04aab9b | 2012-11-23 19:43:50 +0100 | [diff] [blame] | 1090 | int err; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1091 | |
Oleg Nesterov | 06d0713 | 2014-06-27 19:01:40 +0200 | [diff] [blame] | 1092 | if (WARN_ON(!consumer_del(uprobe, uc))) |
Oleg Nesterov | 04aab9b | 2012-11-23 19:43:50 +0100 | [diff] [blame] | 1093 | return; |
| 1094 | |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1095 | err = register_for_each_vma(uprobe, NULL); |
Oleg Nesterov | bb92928 | 2012-11-24 18:27:08 +0100 | [diff] [blame] | 1096 | /* TODO : cant unregister? schedule a worker thread */ |
| 1097 | if (!uprobe->consumers && !err) |
| 1098 | delete_uprobe(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1099 | } |
| 1100 | |
| 1101 | /* |
Linus Torvalds | 7140ad3 | 2018-08-20 18:32:00 -0700 | [diff] [blame] | 1102 | * uprobe_unregister - unregister an already registered probe. |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1103 | * @inode: the file in which the probe has to be removed. |
| 1104 | * @offset: offset from the start of the file. |
| 1105 | * @uc: identify which probe if multiple probes are colocated. |
| 1106 | */ |
| 1107 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
| 1108 | { |
| 1109 | struct uprobe *uprobe; |
| 1110 | |
| 1111 | uprobe = find_uprobe(inode, offset); |
| 1112 | if (WARN_ON(!uprobe)) |
| 1113 | return; |
| 1114 | |
| 1115 | down_write(&uprobe->register_rwsem); |
| 1116 | __uprobe_unregister(uprobe, uc); |
| 1117 | up_write(&uprobe->register_rwsem); |
| 1118 | put_uprobe(uprobe); |
| 1119 | } |
| 1120 | EXPORT_SYMBOL_GPL(uprobe_unregister); |
| 1121 | |
| 1122 | /* |
| 1123 | * __uprobe_register - register a probe |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1124 | * @inode: the file in which the probe has to be placed. |
| 1125 | * @offset: offset from the start of the file. |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 1126 | * @uc: information on howto handle the probe.. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1127 | * |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1128 | * Apart from the access refcount, __uprobe_register() takes a creation |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1129 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
| 1130 | * inserted into the rbtree (i.e first consumer for a @inode:@offset |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1131 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1132 | * @uprobe even before the register operation is complete. Creation |
Srikar Dronamraju | e3343e6 | 2012-03-12 14:55:30 +0530 | [diff] [blame] | 1133 | * refcount is released when the last @uc for the @uprobe |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1134 | * unregisters. Caller of __uprobe_register() is required to keep @inode |
Song Liu | 61f9420 | 2018-04-23 10:21:35 -0700 | [diff] [blame] | 1135 | * (and the containing mount) referenced. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1136 | * |
| 1137 | * Return errno if it cannot successully install probes |
| 1138 | * else return 0 (success) |
| 1139 | */ |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1140 | static int __uprobe_register(struct inode *inode, loff_t offset, |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1141 | loff_t ref_ctr_offset, struct uprobe_consumer *uc) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1142 | { |
| 1143 | struct uprobe *uprobe; |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1144 | int ret; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1145 | |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 1146 | /* Uprobe must have at least one set consumer */ |
| 1147 | if (!uc->handler && !uc->ret_handler) |
| 1148 | return -EINVAL; |
| 1149 | |
Oleg Nesterov | 40814f6 | 2014-05-19 20:41:36 +0200 | [diff] [blame] | 1150 | /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ |
| 1151 | if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) |
Oleg Nesterov | 41ccba0 | 2014-05-19 20:40:54 +0200 | [diff] [blame] | 1152 | return -EIO; |
Oleg Nesterov | f0744af | 2012-11-21 18:01:43 +0100 | [diff] [blame] | 1153 | /* Racy, just to catch the obvious mistakes */ |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1154 | if (offset > i_size_read(inode)) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1155 | return -EINVAL; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1156 | |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 1157 | /* |
| 1158 | * This ensures that copy_from_page(), copy_to_page() and |
| 1159 | * __update_ref_ctr() can't cross page boundary. |
| 1160 | */ |
| 1161 | if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) |
| 1162 | return -EINVAL; |
| 1163 | if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) |
| 1164 | return -EINVAL; |
| 1165 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1166 | retry: |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1167 | uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1168 | if (!uprobe) |
| 1169 | return -ENOMEM; |
Ravi Bangoria | 22bad38 | 2018-08-20 10:12:48 +0530 | [diff] [blame] | 1170 | if (IS_ERR(uprobe)) |
| 1171 | return PTR_ERR(uprobe); |
| 1172 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1173 | /* |
| 1174 | * We can race with uprobe_unregister()->delete_uprobe(). |
| 1175 | * Check uprobe_is_active() and retry if it is false. |
| 1176 | */ |
| 1177 | down_write(&uprobe->register_rwsem); |
| 1178 | ret = -EAGAIN; |
| 1179 | if (likely(uprobe_is_active(uprobe))) { |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1180 | consumer_add(uprobe, uc); |
| 1181 | ret = register_for_each_vma(uprobe, uc); |
Oleg Nesterov | 9a98e03 | 2012-11-23 20:15:17 +0100 | [diff] [blame] | 1182 | if (ret) |
Oleg Nesterov | 04aab9b | 2012-11-23 19:43:50 +0100 | [diff] [blame] | 1183 | __uprobe_unregister(uprobe, uc); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1184 | } |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1185 | up_write(&uprobe->register_rwsem); |
| 1186 | put_uprobe(uprobe); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1187 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 1188 | if (unlikely(ret == -EAGAIN)) |
| 1189 | goto retry; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1190 | return ret; |
| 1191 | } |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1192 | |
| 1193 | int uprobe_register(struct inode *inode, loff_t offset, |
| 1194 | struct uprobe_consumer *uc) |
| 1195 | { |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1196 | return __uprobe_register(inode, offset, 0, uc); |
Ravi Bangoria | 38e967a | 2018-08-09 09:48:51 +0530 | [diff] [blame] | 1197 | } |
Josh Stone | e8440c1 | 2013-01-13 19:03:34 +0100 | [diff] [blame] | 1198 | EXPORT_SYMBOL_GPL(uprobe_register); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1199 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1200 | int uprobe_register_refctr(struct inode *inode, loff_t offset, |
| 1201 | loff_t ref_ctr_offset, struct uprobe_consumer *uc) |
| 1202 | { |
| 1203 | return __uprobe_register(inode, offset, ref_ctr_offset, uc); |
| 1204 | } |
| 1205 | EXPORT_SYMBOL_GPL(uprobe_register_refctr); |
| 1206 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1207 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1208 | * uprobe_apply - unregister an already registered probe. |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1209 | * @inode: the file in which the probe has to be removed. |
| 1210 | * @offset: offset from the start of the file. |
| 1211 | * @uc: consumer which wants to add more or remove some breakpoints |
| 1212 | * @add: add or remove the breakpoints |
| 1213 | */ |
| 1214 | int uprobe_apply(struct inode *inode, loff_t offset, |
| 1215 | struct uprobe_consumer *uc, bool add) |
| 1216 | { |
| 1217 | struct uprobe *uprobe; |
| 1218 | struct uprobe_consumer *con; |
| 1219 | int ret = -ENOENT; |
| 1220 | |
| 1221 | uprobe = find_uprobe(inode, offset); |
Oleg Nesterov | 06d0713 | 2014-06-27 19:01:40 +0200 | [diff] [blame] | 1222 | if (WARN_ON(!uprobe)) |
Oleg Nesterov | bdf8647 | 2013-02-03 19:21:12 +0100 | [diff] [blame] | 1223 | return ret; |
| 1224 | |
| 1225 | down_write(&uprobe->register_rwsem); |
| 1226 | for (con = uprobe->consumers; con && con != uc ; con = con->next) |
| 1227 | ; |
| 1228 | if (con) |
| 1229 | ret = register_for_each_vma(uprobe, add ? uc : NULL); |
| 1230 | up_write(&uprobe->register_rwsem); |
| 1231 | put_uprobe(uprobe); |
| 1232 | |
| 1233 | return ret; |
| 1234 | } |
| 1235 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1236 | static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) |
| 1237 | { |
| 1238 | struct vm_area_struct *vma; |
| 1239 | int err = 0; |
| 1240 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1241 | mmap_read_lock(mm); |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1242 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 1243 | unsigned long vaddr; |
| 1244 | loff_t offset; |
| 1245 | |
| 1246 | if (!valid_vma(vma, false) || |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1247 | file_inode(vma->vm_file) != uprobe->inode) |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1248 | continue; |
| 1249 | |
| 1250 | offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
| 1251 | if (uprobe->offset < offset || |
| 1252 | uprobe->offset >= offset + vma->vm_end - vma->vm_start) |
| 1253 | continue; |
| 1254 | |
| 1255 | vaddr = offset_to_vaddr(vma, uprobe->offset); |
| 1256 | err |= remove_breakpoint(uprobe, mm, vaddr); |
| 1257 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1258 | mmap_read_unlock(mm); |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 1259 | |
| 1260 | return err; |
| 1261 | } |
| 1262 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1263 | static struct rb_node * |
| 1264 | find_node_in_range(struct inode *inode, loff_t min, loff_t max) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1265 | { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1266 | struct rb_node *n = uprobes_tree.rb_node; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1267 | |
| 1268 | while (n) { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1269 | struct uprobe *u = rb_entry(n, struct uprobe, rb_node); |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1270 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1271 | if (inode < u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1272 | n = n->rb_left; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1273 | } else if (inode > u->inode) { |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1274 | n = n->rb_right; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1275 | } else { |
| 1276 | if (max < u->offset) |
| 1277 | n = n->rb_left; |
| 1278 | else if (min > u->offset) |
| 1279 | n = n->rb_right; |
| 1280 | else |
| 1281 | break; |
| 1282 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1283 | } |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1284 | |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1285 | return n; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1286 | } |
| 1287 | |
| 1288 | /* |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1289 | * For a given range in vma, build a list of probes that need to be inserted. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1290 | */ |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1291 | static void build_probe_list(struct inode *inode, |
| 1292 | struct vm_area_struct *vma, |
| 1293 | unsigned long start, unsigned long end, |
| 1294 | struct list_head *head) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1295 | { |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1296 | loff_t min, max; |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1297 | struct rb_node *n, *t; |
| 1298 | struct uprobe *u; |
| 1299 | |
| 1300 | INIT_LIST_HEAD(head); |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 1301 | min = vaddr_to_offset(vma, start); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1302 | max = min + (end - start) - 1; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1303 | |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 1304 | spin_lock(&uprobes_treelock); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1305 | n = find_node_in_range(inode, min, max); |
| 1306 | if (n) { |
| 1307 | for (t = n; t; t = rb_prev(t)) { |
| 1308 | u = rb_entry(t, struct uprobe, rb_node); |
| 1309 | if (u->inode != inode || u->offset < min) |
| 1310 | break; |
| 1311 | list_add(&u->pending_list, head); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 1312 | get_uprobe(u); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1313 | } |
| 1314 | for (t = n; (t = rb_next(t)); ) { |
| 1315 | u = rb_entry(t, struct uprobe, rb_node); |
| 1316 | if (u->inode != inode || u->offset > max) |
| 1317 | break; |
| 1318 | list_add(&u->pending_list, head); |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 1319 | get_uprobe(u); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1320 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1321 | } |
Oleg Nesterov | 6f47caa | 2012-08-18 17:01:57 +0200 | [diff] [blame] | 1322 | spin_unlock(&uprobes_treelock); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1323 | } |
| 1324 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1325 | /* @vma contains reference counter, not the probed instruction. */ |
| 1326 | static int delayed_ref_ctr_inc(struct vm_area_struct *vma) |
| 1327 | { |
| 1328 | struct list_head *pos, *q; |
| 1329 | struct delayed_uprobe *du; |
| 1330 | unsigned long vaddr; |
| 1331 | int ret = 0, err = 0; |
| 1332 | |
| 1333 | mutex_lock(&delayed_uprobe_lock); |
| 1334 | list_for_each_safe(pos, q, &delayed_uprobe_list) { |
| 1335 | du = list_entry(pos, struct delayed_uprobe, list); |
| 1336 | |
| 1337 | if (du->mm != vma->vm_mm || |
| 1338 | !valid_ref_ctr_vma(du->uprobe, vma)) |
| 1339 | continue; |
| 1340 | |
| 1341 | vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); |
| 1342 | ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); |
| 1343 | if (ret) { |
| 1344 | update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); |
| 1345 | if (!err) |
| 1346 | err = ret; |
| 1347 | } |
| 1348 | delayed_uprobe_delete(du); |
| 1349 | } |
| 1350 | mutex_unlock(&delayed_uprobe_lock); |
| 1351 | return err; |
| 1352 | } |
| 1353 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1354 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1355 | * Called from mmap_region/vma_adjust with mm->mmap_lock acquired. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1356 | * |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1357 | * Currently we ignore all errors and always return 0, the callers |
| 1358 | * can't handle the failure anyway. |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1359 | */ |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1360 | int uprobe_mmap(struct vm_area_struct *vma) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1361 | { |
| 1362 | struct list_head tmp_list; |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1363 | struct uprobe *uprobe, *u; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1364 | struct inode *inode; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1365 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1366 | if (no_uprobe_events()) |
| 1367 | return 0; |
| 1368 | |
| 1369 | if (vma->vm_file && |
| 1370 | (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && |
| 1371 | test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) |
| 1372 | delayed_ref_ctr_inc(vma); |
| 1373 | |
| 1374 | if (!valid_vma(vma, true)) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1375 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1376 | |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1377 | inode = file_inode(vma->vm_file); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1378 | if (!inode) |
Ingo Molnar | 7b2d81d | 2012-02-17 09:27:41 +0100 | [diff] [blame] | 1379 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1380 | |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1381 | mutex_lock(uprobes_mmap_hash(inode)); |
Oleg Nesterov | 891c397 | 2012-07-29 20:22:40 +0200 | [diff] [blame] | 1382 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1383 | /* |
| 1384 | * We can race with uprobe_unregister(), this uprobe can be already |
| 1385 | * removed. But in this case filter_chain() must return false, all |
| 1386 | * consumers have gone away. |
| 1387 | */ |
Oleg Nesterov | 665605a | 2012-07-29 20:22:29 +0200 | [diff] [blame] | 1388 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
Oleg Nesterov | 806a98b | 2012-12-27 18:21:11 +0100 | [diff] [blame] | 1389 | if (!fatal_signal_pending(current) && |
Oleg Nesterov | 8a7f2fa | 2012-12-28 17:58:38 +0100 | [diff] [blame] | 1390 | filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { |
Oleg Nesterov | 57683f7 | 2012-07-29 20:22:47 +0200 | [diff] [blame] | 1391 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1392 | install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1393 | } |
| 1394 | put_uprobe(uprobe); |
| 1395 | } |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1396 | mutex_unlock(uprobes_mmap_hash(inode)); |
| 1397 | |
Oleg Nesterov | 5e5be71 | 2012-08-06 14:49:56 +0200 | [diff] [blame] | 1398 | return 0; |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 1399 | } |
| 1400 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1401 | static bool |
| 1402 | vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 1403 | { |
| 1404 | loff_t min, max; |
| 1405 | struct inode *inode; |
| 1406 | struct rb_node *n; |
| 1407 | |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 1408 | inode = file_inode(vma->vm_file); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1409 | |
| 1410 | min = vaddr_to_offset(vma, start); |
| 1411 | max = min + (end - start) - 1; |
| 1412 | |
| 1413 | spin_lock(&uprobes_treelock); |
| 1414 | n = find_node_in_range(inode, min, max); |
| 1415 | spin_unlock(&uprobes_treelock); |
| 1416 | |
| 1417 | return !!n; |
| 1418 | } |
| 1419 | |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1420 | /* |
| 1421 | * Called in context of a munmap of a vma. |
| 1422 | */ |
Srikar Dronamraju | cbc91f7 | 2012-04-11 16:05:27 +0530 | [diff] [blame] | 1423 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1424 | { |
Oleg Nesterov | 441f1eb7 | 2012-11-25 19:54:29 +0100 | [diff] [blame] | 1425 | if (no_uprobe_events() || !valid_vma(vma, false)) |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1426 | return; |
| 1427 | |
Oleg Nesterov | 2fd611a | 2012-07-29 20:22:31 +0200 | [diff] [blame] | 1428 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
| 1429 | return; |
| 1430 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1431 | if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || |
| 1432 | test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1433 | return; |
| 1434 | |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1435 | if (vma_has_uprobes(vma, start, end)) |
| 1436 | set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); |
Srikar Dronamraju | 682968e | 2012-03-30 23:56:46 +0530 | [diff] [blame] | 1437 | } |
| 1438 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1439 | /* Slot allocation for XOL */ |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1440 | static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1441 | { |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1442 | struct vm_area_struct *vma; |
| 1443 | int ret; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1444 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1445 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 598fdc1 | 2016-05-23 16:26:08 -0700 | [diff] [blame] | 1446 | return -EINTR; |
| 1447 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1448 | if (mm->uprobes_state.xol_area) { |
| 1449 | ret = -EALREADY; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1450 | goto fail; |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1451 | } |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1452 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1453 | if (!area->vaddr) { |
| 1454 | /* Try to map as high as possible, this is only a hint. */ |
| 1455 | area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, |
| 1456 | PAGE_SIZE, 0, 0); |
Gaowei Pu | ff68dac | 2019-11-30 17:51:03 -0800 | [diff] [blame] | 1457 | if (IS_ERR_VALUE(area->vaddr)) { |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1458 | ret = area->vaddr; |
| 1459 | goto fail; |
| 1460 | } |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1461 | } |
| 1462 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1463 | vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, |
| 1464 | VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, |
| 1465 | &area->xol_mapping); |
| 1466 | if (IS_ERR(vma)) { |
| 1467 | ret = PTR_ERR(vma); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1468 | goto fail; |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1469 | } |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1470 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1471 | ret = 0; |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1472 | /* pairs with get_xol_area() */ |
| 1473 | smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */ |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1474 | fail: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1475 | mmap_write_unlock(mm); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1476 | |
| 1477 | return ret; |
| 1478 | } |
| 1479 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1480 | static struct xol_area *__create_xol_area(unsigned long vaddr) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1481 | { |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1482 | struct mm_struct *mm = current->mm; |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1483 | uprobe_opcode_t insn = UPROBE_SWBP_INSN; |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1484 | struct xol_area *area; |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1485 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1486 | area = kmalloc(sizeof(*area), GFP_KERNEL); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1487 | if (unlikely(!area)) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1488 | goto out; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1489 | |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 1490 | area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long), |
| 1491 | GFP_KERNEL); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1492 | if (!area->bitmap) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1493 | goto free_area; |
| 1494 | |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1495 | area->xol_mapping.name = "[uprobes]"; |
Oleg Nesterov | 869ae76 | 2016-02-27 23:11:28 +0100 | [diff] [blame] | 1496 | area->xol_mapping.fault = NULL; |
Oleg Nesterov | 704bde3 | 2015-07-21 15:40:33 +0200 | [diff] [blame] | 1497 | area->xol_mapping.pages = area->pages; |
Oleg Nesterov | f58bea2 | 2015-07-21 15:40:31 +0200 | [diff] [blame] | 1498 | area->pages[0] = alloc_page(GFP_HIGHUSER); |
| 1499 | if (!area->pages[0]) |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1500 | goto free_bitmap; |
Oleg Nesterov | f58bea2 | 2015-07-21 15:40:31 +0200 | [diff] [blame] | 1501 | area->pages[1] = NULL; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1502 | |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1503 | area->vaddr = vaddr; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1504 | init_waitqueue_head(&area->wq); |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1505 | /* Reserve the 1st slot for get_trampoline_vaddr() */ |
| 1506 | set_bit(0, area->bitmap); |
| 1507 | atomic_set(&area->slot_count, 1); |
Marcin Nowakowski | 297e765 | 2016-12-13 11:40:57 +0100 | [diff] [blame] | 1508 | arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1509 | |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1510 | if (!xol_add_vma(mm, area)) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1511 | return area; |
| 1512 | |
Oleg Nesterov | f58bea2 | 2015-07-21 15:40:31 +0200 | [diff] [blame] | 1513 | __free_page(area->pages[0]); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1514 | free_bitmap: |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1515 | kfree(area->bitmap); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1516 | free_area: |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1517 | kfree(area); |
Oleg Nesterov | c8a8253 | 2012-12-30 17:40:39 +0100 | [diff] [blame] | 1518 | out: |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1519 | return NULL; |
| 1520 | } |
| 1521 | |
| 1522 | /* |
| 1523 | * get_xol_area - Allocate process's xol_area if necessary. |
| 1524 | * This area will be used for storing instructions for execution out of line. |
| 1525 | * |
| 1526 | * Returns the allocated area or NULL. |
| 1527 | */ |
| 1528 | static struct xol_area *get_xol_area(void) |
| 1529 | { |
| 1530 | struct mm_struct *mm = current->mm; |
| 1531 | struct xol_area *area; |
| 1532 | |
| 1533 | if (!mm->uprobes_state.xol_area) |
Oleg Nesterov | af0d95a | 2013-10-13 21:18:38 +0200 | [diff] [blame] | 1534 | __create_xol_area(0); |
Oleg Nesterov | 6441ec8 | 2013-10-13 21:18:35 +0200 | [diff] [blame] | 1535 | |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1536 | /* Pairs with xol_add_vma() smp_store_release() */ |
| 1537 | area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */ |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1538 | return area; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1539 | } |
| 1540 | |
| 1541 | /* |
| 1542 | * uprobe_clear_state - Free the area allocated for slots. |
| 1543 | */ |
| 1544 | void uprobe_clear_state(struct mm_struct *mm) |
| 1545 | { |
| 1546 | struct xol_area *area = mm->uprobes_state.xol_area; |
| 1547 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 1548 | mutex_lock(&delayed_uprobe_lock); |
| 1549 | delayed_uprobe_remove(NULL, mm); |
| 1550 | mutex_unlock(&delayed_uprobe_lock); |
| 1551 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1552 | if (!area) |
| 1553 | return; |
| 1554 | |
Oleg Nesterov | f58bea2 | 2015-07-21 15:40:31 +0200 | [diff] [blame] | 1555 | put_page(area->pages[0]); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1556 | kfree(area->bitmap); |
| 1557 | kfree(area); |
| 1558 | } |
| 1559 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 1560 | void uprobe_start_dup_mmap(void) |
| 1561 | { |
| 1562 | percpu_down_read(&dup_mmap_sem); |
| 1563 | } |
| 1564 | |
| 1565 | void uprobe_end_dup_mmap(void) |
| 1566 | { |
| 1567 | percpu_up_read(&dup_mmap_sem); |
| 1568 | } |
| 1569 | |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1570 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
| 1571 | { |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1572 | if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1573 | set_bit(MMF_HAS_UPROBES, &newmm->flags); |
Oleg Nesterov | 9f68f672 | 2012-08-19 16:15:09 +0200 | [diff] [blame] | 1574 | /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ |
| 1575 | set_bit(MMF_RECALC_UPROBES, &newmm->flags); |
| 1576 | } |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 1577 | } |
| 1578 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1579 | /* |
| 1580 | * - search for a free slot. |
| 1581 | */ |
| 1582 | static unsigned long xol_take_insn_slot(struct xol_area *area) |
| 1583 | { |
| 1584 | unsigned long slot_addr; |
| 1585 | int slot_nr; |
| 1586 | |
| 1587 | do { |
| 1588 | slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); |
| 1589 | if (slot_nr < UINSNS_PER_PAGE) { |
| 1590 | if (!test_and_set_bit(slot_nr, area->bitmap)) |
| 1591 | break; |
| 1592 | |
| 1593 | slot_nr = UINSNS_PER_PAGE; |
| 1594 | continue; |
| 1595 | } |
| 1596 | wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); |
| 1597 | } while (slot_nr >= UINSNS_PER_PAGE); |
| 1598 | |
| 1599 | slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); |
| 1600 | atomic_inc(&area->slot_count); |
| 1601 | |
| 1602 | return slot_addr; |
| 1603 | } |
| 1604 | |
| 1605 | /* |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1606 | * xol_get_insn_slot - allocate a slot for xol. |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1607 | * Returns the allocated slot address or 0. |
| 1608 | */ |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1609 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1610 | { |
| 1611 | struct xol_area *area; |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1612 | unsigned long xol_vaddr; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1613 | |
Oleg Nesterov | 9b545df | 2012-12-31 16:39:49 +0100 | [diff] [blame] | 1614 | area = get_xol_area(); |
| 1615 | if (!area) |
| 1616 | return 0; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1617 | |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1618 | xol_vaddr = xol_take_insn_slot(area); |
| 1619 | if (unlikely(!xol_vaddr)) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1620 | return 0; |
| 1621 | |
Oleg Nesterov | f58bea2 | 2015-07-21 15:40:31 +0200 | [diff] [blame] | 1622 | arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1623 | &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1624 | |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1625 | return xol_vaddr; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1626 | } |
| 1627 | |
| 1628 | /* |
| 1629 | * xol_free_insn_slot - If slot was earlier allocated by |
| 1630 | * @xol_get_insn_slot(), make the slot available for |
| 1631 | * subsequent requests. |
| 1632 | */ |
| 1633 | static void xol_free_insn_slot(struct task_struct *tsk) |
| 1634 | { |
| 1635 | struct xol_area *area; |
| 1636 | unsigned long vma_end; |
| 1637 | unsigned long slot_addr; |
| 1638 | |
| 1639 | if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) |
| 1640 | return; |
| 1641 | |
| 1642 | slot_addr = tsk->utask->xol_vaddr; |
Oleg Nesterov | af4355e | 2012-12-31 18:37:11 +0100 | [diff] [blame] | 1643 | if (unlikely(!slot_addr)) |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1644 | return; |
| 1645 | |
| 1646 | area = tsk->mm->uprobes_state.xol_area; |
| 1647 | vma_end = area->vaddr + PAGE_SIZE; |
| 1648 | if (area->vaddr <= slot_addr && slot_addr < vma_end) { |
| 1649 | unsigned long offset; |
| 1650 | int slot_nr; |
| 1651 | |
| 1652 | offset = slot_addr - area->vaddr; |
| 1653 | slot_nr = offset / UPROBE_XOL_SLOT_BYTES; |
| 1654 | if (slot_nr >= UINSNS_PER_PAGE) |
| 1655 | return; |
| 1656 | |
| 1657 | clear_bit(slot_nr, area->bitmap); |
| 1658 | atomic_dec(&area->slot_count); |
Oleg Nesterov | 2a742ce | 2015-07-21 15:40:36 +0200 | [diff] [blame] | 1659 | smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1660 | if (waitqueue_active(&area->wq)) |
| 1661 | wake_up(&area->wq); |
| 1662 | |
| 1663 | tsk->utask->xol_vaddr = 0; |
| 1664 | } |
| 1665 | } |
| 1666 | |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1667 | void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
| 1668 | void *src, unsigned long len) |
| 1669 | { |
| 1670 | /* Initialize the slot */ |
| 1671 | copy_to_page(page, vaddr, src, len); |
| 1672 | |
| 1673 | /* |
Christoph Hellwig | 885f7f8 | 2020-06-07 21:42:22 -0700 | [diff] [blame] | 1674 | * We probably need flush_icache_user_page() but it needs vma. |
Victor Kamensky | 72e6ae2 | 2014-04-29 04:20:52 +0100 | [diff] [blame] | 1675 | * This should work on most of architectures by default. If |
| 1676 | * architecture needs to do something different it can define |
| 1677 | * its own version of the function. |
| 1678 | */ |
| 1679 | flush_dcache_page(page); |
| 1680 | } |
| 1681 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1682 | /** |
| 1683 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs |
| 1684 | * @regs: Reflects the saved state of the task after it has hit a breakpoint |
| 1685 | * instruction. |
| 1686 | * Return the address of the breakpoint instruction. |
| 1687 | */ |
| 1688 | unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) |
| 1689 | { |
| 1690 | return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; |
| 1691 | } |
| 1692 | |
Oleg Nesterov | b02ef20 | 2014-05-12 18:24:45 +0200 | [diff] [blame] | 1693 | unsigned long uprobe_get_trap_addr(struct pt_regs *regs) |
| 1694 | { |
| 1695 | struct uprobe_task *utask = current->utask; |
| 1696 | |
| 1697 | if (unlikely(utask && utask->active_uprobe)) |
| 1698 | return utask->vaddr; |
| 1699 | |
| 1700 | return instruction_pointer(regs); |
| 1701 | } |
| 1702 | |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1703 | static struct return_instance *free_ret_instance(struct return_instance *ri) |
| 1704 | { |
| 1705 | struct return_instance *next = ri->next; |
| 1706 | put_uprobe(ri->uprobe); |
| 1707 | kfree(ri); |
| 1708 | return next; |
| 1709 | } |
| 1710 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1711 | /* |
| 1712 | * Called with no locks held. |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1713 | * Called in context of an exiting or an exec-ing thread. |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1714 | */ |
| 1715 | void uprobe_free_utask(struct task_struct *t) |
| 1716 | { |
| 1717 | struct uprobe_task *utask = t->utask; |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1718 | struct return_instance *ri; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1719 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1720 | if (!utask) |
| 1721 | return; |
| 1722 | |
| 1723 | if (utask->active_uprobe) |
| 1724 | put_uprobe(utask->active_uprobe); |
| 1725 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1726 | ri = utask->return_instances; |
Oleg Nesterov | 2bb5e84 | 2015-07-21 15:40:06 +0200 | [diff] [blame] | 1727 | while (ri) |
| 1728 | ri = free_ret_instance(ri); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1729 | |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1730 | xol_free_insn_slot(t); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1731 | kfree(utask); |
| 1732 | t->utask = NULL; |
| 1733 | } |
| 1734 | |
| 1735 | /* |
Randy Dunlap | c034f48 | 2021-02-25 17:21:10 -0800 | [diff] [blame] | 1736 | * Allocate a uprobe_task object for the task if necessary. |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1737 | * Called when the thread hits a breakpoint. |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1738 | * |
| 1739 | * Returns: |
| 1740 | * - pointer to new uprobe_task on success |
| 1741 | * - NULL otherwise |
| 1742 | */ |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1743 | static struct uprobe_task *get_utask(void) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1744 | { |
Oleg Nesterov | 5a2df66 | 2012-12-31 17:03:32 +0100 | [diff] [blame] | 1745 | if (!current->utask) |
| 1746 | current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); |
| 1747 | return current->utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1748 | } |
| 1749 | |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1750 | static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) |
| 1751 | { |
| 1752 | struct uprobe_task *n_utask; |
| 1753 | struct return_instance **p, *o, *n; |
| 1754 | |
| 1755 | n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); |
| 1756 | if (!n_utask) |
| 1757 | return -ENOMEM; |
| 1758 | t->utask = n_utask; |
| 1759 | |
| 1760 | p = &n_utask->return_instances; |
| 1761 | for (o = o_utask->return_instances; o; o = o->next) { |
| 1762 | n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); |
| 1763 | if (!n) |
| 1764 | return -ENOMEM; |
| 1765 | |
| 1766 | *n = *o; |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 1767 | get_uprobe(n->uprobe); |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1768 | n->next = NULL; |
| 1769 | |
| 1770 | *p = n; |
| 1771 | p = &n->next; |
| 1772 | n_utask->depth++; |
| 1773 | } |
| 1774 | |
| 1775 | return 0; |
| 1776 | } |
| 1777 | |
| 1778 | static void uprobe_warn(struct task_struct *t, const char *msg) |
| 1779 | { |
| 1780 | pr_warn("uprobe: %s:%d failed to %s\n", |
| 1781 | current->comm, current->pid, msg); |
| 1782 | } |
| 1783 | |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1784 | static void dup_xol_work(struct callback_head *work) |
| 1785 | { |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1786 | if (current->flags & PF_EXITING) |
| 1787 | return; |
| 1788 | |
Michal Hocko | 598fdc1 | 2016-05-23 16:26:08 -0700 | [diff] [blame] | 1789 | if (!__create_xol_area(current->utask->dup_xol_addr) && |
| 1790 | !fatal_signal_pending(current)) |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1791 | uprobe_warn(current, "dup xol area"); |
| 1792 | } |
| 1793 | |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1794 | /* |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1795 | * Called in context of a new clone/fork from copy_process. |
| 1796 | */ |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1797 | void uprobe_copy_process(struct task_struct *t, unsigned long flags) |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1798 | { |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1799 | struct uprobe_task *utask = current->utask; |
| 1800 | struct mm_struct *mm = current->mm; |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1801 | struct xol_area *area; |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1802 | |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1803 | t->utask = NULL; |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1804 | |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1805 | if (!utask || !utask->return_instances) |
| 1806 | return; |
| 1807 | |
| 1808 | if (mm == t->mm && !(flags & CLONE_VFORK)) |
Oleg Nesterov | 248d3a7 | 2013-10-13 21:18:41 +0200 | [diff] [blame] | 1809 | return; |
| 1810 | |
| 1811 | if (dup_utask(t, utask)) |
| 1812 | return uprobe_warn(t, "dup ret instances"); |
Oleg Nesterov | aa59c53 | 2013-10-13 21:18:44 +0200 | [diff] [blame] | 1813 | |
| 1814 | /* The task can fork() after dup_xol_work() fails */ |
| 1815 | area = mm->uprobes_state.xol_area; |
| 1816 | if (!area) |
| 1817 | return uprobe_warn(t, "dup xol area"); |
| 1818 | |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1819 | if (mm == t->mm) |
| 1820 | return; |
| 1821 | |
Oleg Nesterov | 3247343 | 2013-11-08 18:52:21 +0100 | [diff] [blame] | 1822 | t->utask->dup_xol_addr = area->vaddr; |
| 1823 | init_task_work(&t->utask->dup_xol_work, dup_xol_work); |
Jens Axboe | 91989c7 | 2020-10-16 09:02:26 -0600 | [diff] [blame] | 1824 | task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); |
Oleg Nesterov | b68e074 | 2013-10-13 21:18:31 +0200 | [diff] [blame] | 1825 | } |
| 1826 | |
| 1827 | /* |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1828 | * Current area->vaddr notion assume the trampoline address is always |
| 1829 | * equal area->vaddr. |
| 1830 | * |
| 1831 | * Returns -1 in case the xol_area is not allocated. |
| 1832 | */ |
| 1833 | static unsigned long get_trampoline_vaddr(void) |
| 1834 | { |
| 1835 | struct xol_area *area; |
| 1836 | unsigned long trampoline_vaddr = -1; |
| 1837 | |
Paul E. McKenney | 5c6338b | 2017-10-09 11:08:53 -0700 | [diff] [blame] | 1838 | /* Pairs with xol_add_vma() smp_store_release() */ |
| 1839 | area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ |
Anton Arapov | e78aebf | 2013-04-03 18:00:32 +0200 | [diff] [blame] | 1840 | if (area) |
| 1841 | trampoline_vaddr = area->vaddr; |
| 1842 | |
| 1843 | return trampoline_vaddr; |
| 1844 | } |
| 1845 | |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1846 | static void cleanup_return_instances(struct uprobe_task *utask, bool chained, |
| 1847 | struct pt_regs *regs) |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1848 | { |
| 1849 | struct return_instance *ri = utask->return_instances; |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1850 | enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 1851 | |
| 1852 | while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1853 | ri = free_ret_instance(ri); |
| 1854 | utask->depth--; |
| 1855 | } |
| 1856 | utask->return_instances = ri; |
| 1857 | } |
| 1858 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1859 | static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) |
| 1860 | { |
| 1861 | struct return_instance *ri; |
| 1862 | struct uprobe_task *utask; |
| 1863 | unsigned long orig_ret_vaddr, trampoline_vaddr; |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1864 | bool chained; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1865 | |
| 1866 | if (!get_xol_area()) |
| 1867 | return; |
| 1868 | |
| 1869 | utask = get_utask(); |
| 1870 | if (!utask) |
| 1871 | return; |
| 1872 | |
Anton Arapov | ded49c5 | 2013-04-03 18:00:37 +0200 | [diff] [blame] | 1873 | if (utask->depth >= MAX_URETPROBE_DEPTH) { |
| 1874 | printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" |
| 1875 | " nestedness limit pid/tgid=%d/%d\n", |
| 1876 | current->pid, current->tgid); |
| 1877 | return; |
| 1878 | } |
| 1879 | |
Oleg Nesterov | 6c58d0e | 2015-07-21 15:40:10 +0200 | [diff] [blame] | 1880 | ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1881 | if (!ri) |
Oleg Nesterov | 6c58d0e | 2015-07-21 15:40:10 +0200 | [diff] [blame] | 1882 | return; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1883 | |
| 1884 | trampoline_vaddr = get_trampoline_vaddr(); |
| 1885 | orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); |
| 1886 | if (orig_ret_vaddr == -1) |
| 1887 | goto fail; |
| 1888 | |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1889 | /* drop the entries invalidated by longjmp() */ |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1890 | chained = (orig_ret_vaddr == trampoline_vaddr); |
| 1891 | cleanup_return_instances(utask, chained, regs); |
Oleg Nesterov | a5b7e1a | 2015-07-21 15:40:23 +0200 | [diff] [blame] | 1892 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1893 | /* |
| 1894 | * We don't want to keep trampoline address in stack, rather keep the |
| 1895 | * original return address of first caller thru all the consequent |
| 1896 | * instances. This also makes breakpoint unwrapping easier. |
| 1897 | */ |
Oleg Nesterov | db087ef | 2015-07-21 15:40:28 +0200 | [diff] [blame] | 1898 | if (chained) { |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1899 | if (!utask->return_instances) { |
| 1900 | /* |
| 1901 | * This situation is not possible. Likely we have an |
| 1902 | * attack from user-space. |
| 1903 | */ |
Oleg Nesterov | 6c58d0e | 2015-07-21 15:40:10 +0200 | [diff] [blame] | 1904 | uprobe_warn(current, "handle tail call"); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1905 | goto fail; |
| 1906 | } |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1907 | orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; |
| 1908 | } |
| 1909 | |
Oleg Nesterov | f231722 | 2015-07-21 15:40:03 +0200 | [diff] [blame] | 1910 | ri->uprobe = get_uprobe(uprobe); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1911 | ri->func = instruction_pointer(regs); |
Oleg Nesterov | 7b868e4 | 2015-07-21 15:40:18 +0200 | [diff] [blame] | 1912 | ri->stack = user_stack_pointer(regs); |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1913 | ri->orig_ret_vaddr = orig_ret_vaddr; |
| 1914 | ri->chained = chained; |
| 1915 | |
Anton Arapov | ded49c5 | 2013-04-03 18:00:37 +0200 | [diff] [blame] | 1916 | utask->depth++; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1917 | ri->next = utask->return_instances; |
| 1918 | utask->return_instances = ri; |
| 1919 | |
| 1920 | return; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 1921 | fail: |
| 1922 | kfree(ri); |
| 1923 | } |
| 1924 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1925 | /* Prepare to single-step probed instruction out of line. */ |
| 1926 | static int |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1927 | pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1928 | { |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1929 | struct uprobe_task *utask; |
| 1930 | unsigned long xol_vaddr; |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 1931 | int err; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 1932 | |
Oleg Nesterov | 608e742 | 2012-12-31 18:20:42 +0100 | [diff] [blame] | 1933 | utask = get_utask(); |
| 1934 | if (!utask) |
| 1935 | return -ENOMEM; |
Oleg Nesterov | a6cb3f6 | 2012-12-31 18:00:06 +0100 | [diff] [blame] | 1936 | |
| 1937 | xol_vaddr = xol_get_insn_slot(uprobe); |
| 1938 | if (!xol_vaddr) |
| 1939 | return -ENOMEM; |
| 1940 | |
| 1941 | utask->xol_vaddr = xol_vaddr; |
| 1942 | utask->vaddr = bp_vaddr; |
| 1943 | |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 1944 | err = arch_uprobe_pre_xol(&uprobe->arch, regs); |
| 1945 | if (unlikely(err)) { |
| 1946 | xol_free_insn_slot(current); |
| 1947 | return err; |
| 1948 | } |
| 1949 | |
Oleg Nesterov | 608e742 | 2012-12-31 18:20:42 +0100 | [diff] [blame] | 1950 | utask->active_uprobe = uprobe; |
| 1951 | utask->state = UTASK_SSTEP; |
Oleg Nesterov | aba5102 | 2012-12-31 18:12:48 +0100 | [diff] [blame] | 1952 | return 0; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1953 | } |
| 1954 | |
| 1955 | /* |
| 1956 | * If we are singlestepping, then ensure this thread is not connected to |
| 1957 | * non-fatal signals until completion of singlestep. When xol insn itself |
| 1958 | * triggers the signal, restart the original insn even if the task is |
| 1959 | * already SIGKILL'ed (since coredump should report the correct ip). This |
| 1960 | * is even more important if the task has a handler for SIGSEGV/etc, The |
| 1961 | * _same_ instruction should be repeated again after return from the signal |
| 1962 | * handler, and SSTEP can never finish in this case. |
| 1963 | */ |
| 1964 | bool uprobe_deny_signal(void) |
| 1965 | { |
| 1966 | struct task_struct *t = current; |
| 1967 | struct uprobe_task *utask = t->utask; |
| 1968 | |
| 1969 | if (likely(!utask || !utask->active_uprobe)) |
| 1970 | return false; |
| 1971 | |
| 1972 | WARN_ON_ONCE(utask->state != UTASK_SSTEP); |
| 1973 | |
Jens Axboe | 5c251e9 | 2020-10-26 14:32:27 -0600 | [diff] [blame] | 1974 | if (task_sigpending(t)) { |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1975 | spin_lock_irq(&t->sighand->siglock); |
| 1976 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
| 1977 | spin_unlock_irq(&t->sighand->siglock); |
| 1978 | |
| 1979 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { |
| 1980 | utask->state = UTASK_SSTEP_TRAPPED; |
| 1981 | set_tsk_thread_flag(t, TIF_UPROBE); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1982 | } |
| 1983 | } |
| 1984 | |
| 1985 | return true; |
| 1986 | } |
| 1987 | |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 1988 | static void mmf_recalc_uprobes(struct mm_struct *mm) |
| 1989 | { |
| 1990 | struct vm_area_struct *vma; |
| 1991 | |
| 1992 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 1993 | if (!valid_vma(vma, false)) |
| 1994 | continue; |
| 1995 | /* |
| 1996 | * This is not strictly accurate, we can race with |
| 1997 | * uprobe_unregister() and see the already removed |
| 1998 | * uprobe if delete_uprobe() was not yet called. |
Oleg Nesterov | 63633cb | 2012-11-22 18:30:15 +0100 | [diff] [blame] | 1999 | * Or this uprobe can be filtered out. |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2000 | */ |
| 2001 | if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) |
| 2002 | return; |
| 2003 | } |
| 2004 | |
| 2005 | clear_bit(MMF_HAS_UPROBES, &mm->flags); |
| 2006 | } |
| 2007 | |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2008 | static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2009 | { |
| 2010 | struct page *page; |
| 2011 | uprobe_opcode_t opcode; |
| 2012 | int result; |
| 2013 | |
Oleg Nesterov | 013b2de | 2020-05-04 18:47:25 +0200 | [diff] [blame] | 2014 | if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) |
| 2015 | return -EINVAL; |
| 2016 | |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2017 | pagefault_disable(); |
Linus Torvalds | bd28b14 | 2016-05-22 17:21:27 -0700 | [diff] [blame] | 2018 | result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2019 | pagefault_enable(); |
| 2020 | |
| 2021 | if (likely(result == 0)) |
| 2022 | goto out; |
| 2023 | |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 2024 | /* |
| 2025 | * The NULL 'tsk' here ensures that any faults that occur here |
| 2026 | * will not be accounted to the task. 'mm' *is* current->mm, |
| 2027 | * but we treat this as a 'remote' access since it is |
| 2028 | * essentially a kernel access to the memory. |
| 2029 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2030 | result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, |
Lorenzo Stoakes | 5b56d49 | 2016-12-14 15:06:52 -0800 | [diff] [blame] | 2031 | NULL, NULL); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2032 | if (result < 0) |
| 2033 | return result; |
| 2034 | |
Oleg Nesterov | ab0d805 | 2013-03-24 18:24:37 +0100 | [diff] [blame] | 2035 | copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2036 | put_page(page); |
| 2037 | out: |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2038 | /* This needs to return true for any variant of the trap insn */ |
| 2039 | return is_trap_insn(&opcode); |
Oleg Nesterov | ec75fba | 2012-09-23 21:55:19 +0200 | [diff] [blame] | 2040 | } |
| 2041 | |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 2042 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2043 | { |
| 2044 | struct mm_struct *mm = current->mm; |
| 2045 | struct uprobe *uprobe = NULL; |
| 2046 | struct vm_area_struct *vma; |
| 2047 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2048 | mmap_read_lock(mm); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2049 | vma = find_vma(mm, bp_vaddr); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2050 | if (vma && vma->vm_start <= bp_vaddr) { |
| 2051 | if (valid_vma(vma, false)) { |
Oleg Nesterov | f281769 | 2013-03-17 18:54:44 +0100 | [diff] [blame] | 2052 | struct inode *inode = file_inode(vma->vm_file); |
Oleg Nesterov | cb113b4 | 2012-07-29 20:22:42 +0200 | [diff] [blame] | 2053 | loff_t offset = vaddr_to_offset(vma, bp_vaddr); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2054 | |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2055 | uprobe = find_uprobe(inode, offset); |
| 2056 | } |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 2057 | |
| 2058 | if (!uprobe) |
Ananth N Mavinakayanahalli | 0908ad6 | 2013-03-22 20:46:27 +0530 | [diff] [blame] | 2059 | *is_swbp = is_trap_at_addr(mm, bp_vaddr); |
Oleg Nesterov | d790d34 | 2012-05-29 21:29:14 +0200 | [diff] [blame] | 2060 | } else { |
| 2061 | *is_swbp = -EFAULT; |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2062 | } |
Oleg Nesterov | 499a4f3 | 2012-08-19 17:41:34 +0200 | [diff] [blame] | 2063 | |
| 2064 | if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) |
| 2065 | mmf_recalc_uprobes(mm); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2066 | mmap_read_unlock(mm); |
Oleg Nesterov | 3a9ea05 | 2012-05-29 21:28:57 +0200 | [diff] [blame] | 2067 | |
| 2068 | return uprobe; |
| 2069 | } |
| 2070 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2071 | static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) |
| 2072 | { |
| 2073 | struct uprobe_consumer *uc; |
| 2074 | int remove = UPROBE_HANDLER_REMOVE; |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2075 | bool need_prep = false; /* prepare return uprobe, when needed */ |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2076 | |
| 2077 | down_read(&uprobe->register_rwsem); |
| 2078 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2079 | int rc = 0; |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2080 | |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2081 | if (uc->handler) { |
| 2082 | rc = uc->handler(uc, regs); |
| 2083 | WARN(rc & ~UPROBE_HANDLER_MASK, |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 2084 | "bad rc=0x%x from %ps()\n", rc, uc->handler); |
Anton Arapov | ea02487 | 2013-04-03 18:00:31 +0200 | [diff] [blame] | 2085 | } |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2086 | |
| 2087 | if (uc->ret_handler) |
| 2088 | need_prep = true; |
| 2089 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2090 | remove &= rc; |
| 2091 | } |
| 2092 | |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2093 | if (need_prep && !remove) |
| 2094 | prepare_uretprobe(uprobe, regs); /* put bp at return */ |
| 2095 | |
Oleg Nesterov | da1816b | 2012-12-29 17:49:11 +0100 | [diff] [blame] | 2096 | if (remove && uprobe->consumers) { |
| 2097 | WARN_ON(!uprobe_is_active(uprobe)); |
| 2098 | unapply_uprobe(uprobe, current->mm); |
| 2099 | } |
| 2100 | up_read(&uprobe->register_rwsem); |
| 2101 | } |
| 2102 | |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2103 | static void |
| 2104 | handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) |
| 2105 | { |
| 2106 | struct uprobe *uprobe = ri->uprobe; |
| 2107 | struct uprobe_consumer *uc; |
| 2108 | |
| 2109 | down_read(&uprobe->register_rwsem); |
| 2110 | for (uc = uprobe->consumers; uc; uc = uc->next) { |
| 2111 | if (uc->ret_handler) |
| 2112 | uc->ret_handler(uc, ri->func, regs); |
| 2113 | } |
| 2114 | up_read(&uprobe->register_rwsem); |
| 2115 | } |
| 2116 | |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2117 | static struct return_instance *find_next_ret_chain(struct return_instance *ri) |
| 2118 | { |
| 2119 | bool chained; |
| 2120 | |
| 2121 | do { |
| 2122 | chained = ri->chained; |
| 2123 | ri = ri->next; /* can't be NULL if chained */ |
| 2124 | } while (chained); |
| 2125 | |
| 2126 | return ri; |
| 2127 | } |
| 2128 | |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2129 | static void handle_trampoline(struct pt_regs *regs) |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2130 | { |
| 2131 | struct uprobe_task *utask; |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2132 | struct return_instance *ri, *next; |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2133 | bool valid; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2134 | |
| 2135 | utask = current->utask; |
| 2136 | if (!utask) |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2137 | goto sigill; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2138 | |
| 2139 | ri = utask->return_instances; |
| 2140 | if (!ri) |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2141 | goto sigill; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2142 | |
Oleg Nesterov | a83cfeb | 2015-07-21 15:40:13 +0200 | [diff] [blame] | 2143 | do { |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2144 | /* |
| 2145 | * We should throw out the frames invalidated by longjmp(). |
| 2146 | * If this chain is valid, then the next one should be alive |
| 2147 | * or NULL; the latter case means that nobody but ri->func |
| 2148 | * could hit this trampoline on return. TODO: sigaltstack(). |
| 2149 | */ |
| 2150 | next = find_next_ret_chain(ri); |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 2151 | valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs); |
Oleg Nesterov | 5eeb50d | 2015-07-21 15:40:21 +0200 | [diff] [blame] | 2152 | |
| 2153 | instruction_pointer_set(regs, ri->orig_ret_vaddr); |
| 2154 | do { |
| 2155 | if (valid) |
| 2156 | handle_uretprobe_chain(ri, regs); |
| 2157 | ri = free_ret_instance(ri); |
| 2158 | utask->depth--; |
| 2159 | } while (ri != next); |
| 2160 | } while (!valid); |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2161 | |
| 2162 | utask->return_instances = ri; |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2163 | return; |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2164 | |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2165 | sigill: |
| 2166 | uprobe_warn(current, "handle uretprobe, sending SIGILL."); |
Eric W. Biederman | 3cf5d07 | 2019-05-23 10:17:27 -0500 | [diff] [blame] | 2167 | force_sig(SIGILL); |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2168 | |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2169 | } |
| 2170 | |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2171 | bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) |
| 2172 | { |
| 2173 | return false; |
| 2174 | } |
| 2175 | |
Oleg Nesterov | 86dcb70 | 2015-07-21 15:40:26 +0200 | [diff] [blame] | 2176 | bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, |
| 2177 | struct pt_regs *regs) |
Oleg Nesterov | 97da897 | 2015-07-21 15:40:16 +0200 | [diff] [blame] | 2178 | { |
| 2179 | return true; |
| 2180 | } |
| 2181 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2182 | /* |
| 2183 | * Run handler and ask thread to singlestep. |
| 2184 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
| 2185 | */ |
| 2186 | static void handle_swbp(struct pt_regs *regs) |
| 2187 | { |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2188 | struct uprobe *uprobe; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2189 | unsigned long bp_vaddr; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 2190 | int is_swbp; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2191 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2192 | bp_vaddr = uprobe_get_swbp_addr(regs); |
Oleg Nesterov | 0b5256c | 2015-07-21 15:40:08 +0200 | [diff] [blame] | 2193 | if (bp_vaddr == get_trampoline_vaddr()) |
| 2194 | return handle_trampoline(regs); |
Anton Arapov | fec8898 | 2013-04-03 18:00:36 +0200 | [diff] [blame] | 2195 | |
| 2196 | uprobe = find_active_uprobe(bp_vaddr, &is_swbp); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2197 | if (!uprobe) { |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 2198 | if (is_swbp > 0) { |
| 2199 | /* No matching uprobe; signal SIGTRAP. */ |
Oleg Nesterov | fe5ed7a | 2020-07-23 17:44:20 +0200 | [diff] [blame] | 2200 | force_sig(SIGTRAP); |
Oleg Nesterov | 56bb4cf | 2012-05-29 21:29:47 +0200 | [diff] [blame] | 2201 | } else { |
| 2202 | /* |
| 2203 | * Either we raced with uprobe_unregister() or we can't |
| 2204 | * access this memory. The latter is only possible if |
| 2205 | * another thread plays with our ->mm. In both cases |
| 2206 | * we can simply restart. If this vma was unmapped we |
| 2207 | * can pretend this insn was not executed yet and get |
| 2208 | * the (correct) SIGSEGV after restart. |
| 2209 | */ |
| 2210 | instruction_pointer_set(regs, bp_vaddr); |
| 2211 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2212 | return; |
| 2213 | } |
Oleg Nesterov | 74e59df | 2012-12-30 15:54:08 +0100 | [diff] [blame] | 2214 | |
| 2215 | /* change it in advance for ->handler() and restart */ |
| 2216 | instruction_pointer_set(regs, bp_vaddr); |
| 2217 | |
Oleg Nesterov | 142b18d | 2012-09-29 21:56:57 +0200 | [diff] [blame] | 2218 | /* |
| 2219 | * TODO: move copy_insn/etc into _register and remove this hack. |
| 2220 | * After we hit the bp, _unregister + _register can install the |
| 2221 | * new and not-yet-analyzed uprobe at the same address, restart. |
| 2222 | */ |
Oleg Nesterov | 71434f2 | 2012-09-30 21:12:44 +0200 | [diff] [blame] | 2223 | if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) |
Oleg Nesterov | 74e59df | 2012-12-30 15:54:08 +0100 | [diff] [blame] | 2224 | goto out; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2225 | |
Andrea Parri | 09d3f01 | 2018-11-22 17:10:31 +0100 | [diff] [blame] | 2226 | /* |
| 2227 | * Pairs with the smp_wmb() in prepare_uprobe(). |
| 2228 | * |
| 2229 | * Guarantees that if we see the UPROBE_COPY_INSN bit set, then |
| 2230 | * we must also see the stores to &uprobe->arch performed by the |
| 2231 | * prepare_uprobe() call. |
| 2232 | */ |
| 2233 | smp_rmb(); |
| 2234 | |
Oleg Nesterov | 72fd293 | 2013-11-26 09:35:25 +0900 | [diff] [blame] | 2235 | /* Tracing handlers use ->utask to communicate with fetch methods */ |
| 2236 | if (!get_utask()) |
| 2237 | goto out; |
| 2238 | |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2239 | if (arch_uprobe_ignore(&uprobe->arch, regs)) |
| 2240 | goto out; |
| 2241 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2242 | handler_chain(uprobe, regs); |
David A. Long | 6fe50a2 | 2014-02-03 14:25:49 -0500 | [diff] [blame] | 2243 | |
Oleg Nesterov | 8a6b173 | 2014-03-30 18:56:22 +0200 | [diff] [blame] | 2244 | if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) |
Oleg Nesterov | 0578a97 | 2012-09-14 18:31:23 +0200 | [diff] [blame] | 2245 | goto out; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2246 | |
Oleg Nesterov | 608e742 | 2012-12-31 18:20:42 +0100 | [diff] [blame] | 2247 | if (!pre_ssout(uprobe, regs, bp_vaddr)) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2248 | return; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2249 | |
Oleg Nesterov | 8a6b173 | 2014-03-30 18:56:22 +0200 | [diff] [blame] | 2250 | /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ |
Oleg Nesterov | 0578a97 | 2012-09-14 18:31:23 +0200 | [diff] [blame] | 2251 | out: |
Sebastian Andrzej Siewior | 8bd8744 | 2012-08-07 18:12:30 +0200 | [diff] [blame] | 2252 | put_uprobe(uprobe); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2253 | } |
| 2254 | |
| 2255 | /* |
| 2256 | * Perform required fix-ups and disable singlestep. |
| 2257 | * Allow pending signals to take effect. |
| 2258 | */ |
| 2259 | static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) |
| 2260 | { |
| 2261 | struct uprobe *uprobe; |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2262 | int err = 0; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2263 | |
| 2264 | uprobe = utask->active_uprobe; |
| 2265 | if (utask->state == UTASK_SSTEP_ACK) |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2266 | err = arch_uprobe_post_xol(&uprobe->arch, regs); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2267 | else if (utask->state == UTASK_SSTEP_TRAPPED) |
| 2268 | arch_uprobe_abort_xol(&uprobe->arch, regs); |
| 2269 | else |
| 2270 | WARN_ON_ONCE(1); |
| 2271 | |
| 2272 | put_uprobe(uprobe); |
| 2273 | utask->active_uprobe = NULL; |
| 2274 | utask->state = UTASK_RUNNING; |
Srikar Dronamraju | d4b3b63 | 2012-03-30 23:56:31 +0530 | [diff] [blame] | 2275 | xol_free_insn_slot(current); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2276 | |
| 2277 | spin_lock_irq(¤t->sighand->siglock); |
| 2278 | recalc_sigpending(); /* see uprobe_deny_signal() */ |
| 2279 | spin_unlock_irq(¤t->sighand->siglock); |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2280 | |
| 2281 | if (unlikely(err)) { |
| 2282 | uprobe_warn(current, "execute the probed insn, sending SIGILL."); |
Eric W. Biederman | 3cf5d07 | 2019-05-23 10:17:27 -0500 | [diff] [blame] | 2283 | force_sig(SIGILL); |
Oleg Nesterov | 014940b | 2014-04-03 20:20:10 +0200 | [diff] [blame] | 2284 | } |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2285 | } |
| 2286 | |
| 2287 | /* |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2288 | * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and |
| 2289 | * allows the thread to return from interrupt. After that handle_swbp() |
| 2290 | * sets utask->active_uprobe. |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2291 | * |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2292 | * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag |
| 2293 | * and allows the thread to return from interrupt. |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2294 | * |
| 2295 | * While returning to userspace, thread notices the TIF_UPROBE flag and calls |
| 2296 | * uprobe_notify_resume(). |
| 2297 | */ |
| 2298 | void uprobe_notify_resume(struct pt_regs *regs) |
| 2299 | { |
| 2300 | struct uprobe_task *utask; |
| 2301 | |
Oleg Nesterov | db023ea | 2012-09-14 19:05:46 +0200 | [diff] [blame] | 2302 | clear_thread_flag(TIF_UPROBE); |
| 2303 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2304 | utask = current->utask; |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2305 | if (utask && utask->active_uprobe) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2306 | handle_singlestep(utask, regs); |
Oleg Nesterov | 1b08e907 | 2012-09-14 18:52:10 +0200 | [diff] [blame] | 2307 | else |
| 2308 | handle_swbp(regs); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2309 | } |
| 2310 | |
| 2311 | /* |
| 2312 | * uprobe_pre_sstep_notifier gets called from interrupt context as part of |
| 2313 | * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit. |
| 2314 | */ |
| 2315 | int uprobe_pre_sstep_notifier(struct pt_regs *regs) |
| 2316 | { |
Anton Arapov | 0dfd0eb | 2013-04-03 18:00:35 +0200 | [diff] [blame] | 2317 | if (!current->mm) |
| 2318 | return 0; |
| 2319 | |
| 2320 | if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && |
| 2321 | (!current->utask || !current->utask->return_instances)) |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2322 | return 0; |
| 2323 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2324 | set_thread_flag(TIF_UPROBE); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2325 | return 1; |
| 2326 | } |
| 2327 | |
| 2328 | /* |
| 2329 | * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier |
| 2330 | * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep. |
| 2331 | */ |
| 2332 | int uprobe_post_sstep_notifier(struct pt_regs *regs) |
| 2333 | { |
| 2334 | struct uprobe_task *utask = current->utask; |
| 2335 | |
| 2336 | if (!current->mm || !utask || !utask->active_uprobe) |
| 2337 | /* task is currently not uprobed */ |
| 2338 | return 0; |
| 2339 | |
| 2340 | utask->state = UTASK_SSTEP_ACK; |
| 2341 | set_thread_flag(TIF_UPROBE); |
| 2342 | return 1; |
| 2343 | } |
| 2344 | |
| 2345 | static struct notifier_block uprobe_exception_nb = { |
| 2346 | .notifier_call = arch_uprobe_exception_notify, |
| 2347 | .priority = INT_MAX-1, /* notified after kprobes, kgdb */ |
| 2348 | }; |
| 2349 | |
Nadav Amit | aad42dd | 2019-04-26 16:22:44 -0700 | [diff] [blame] | 2350 | void __init uprobes_init(void) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2351 | { |
| 2352 | int i; |
| 2353 | |
Oleg Nesterov | 66d06df | 2012-11-25 22:48:37 +0100 | [diff] [blame] | 2354 | for (i = 0; i < UPROBES_HASH_SZ; i++) |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2355 | mutex_init(&uprobes_mmap_mutex[i]); |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 2356 | |
Nadav Amit | aad42dd | 2019-04-26 16:22:44 -0700 | [diff] [blame] | 2357 | BUG_ON(register_die_notifier(&uprobe_exception_nb)); |
Srikar Dronamraju | 2b14449 | 2012-02-09 14:56:42 +0530 | [diff] [blame] | 2358 | } |