Thomas Gleixner | 469eb32 | 2019-01-16 12:11:00 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 2 | /* |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 3 | * Copyright (C) 2007 Alan Stern |
| 4 | * Copyright (C) IBM Corporation, 2009 |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 5 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 6 | * |
| 7 | * Thanks to Ingo Molnar for his many suggestions. |
K.Prasad | ba6909b | 2009-11-23 21:17:13 +0530 | [diff] [blame] | 8 | * |
| 9 | * Authors: Alan Stern <stern@rowland.harvard.edu> |
| 10 | * K.Prasad <prasad@linux.vnet.ibm.com> |
| 11 | * Frederic Weisbecker <fweisbec@gmail.com> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | /* |
| 15 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, |
| 16 | * using the CPU's debug registers. |
| 17 | * This file contains the arch-independent routines. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/irqflags.h> |
| 21 | #include <linux/kallsyms.h> |
| 22 | #include <linux/notifier.h> |
| 23 | #include <linux/kprobes.h> |
| 24 | #include <linux/kdebug.h> |
| 25 | #include <linux/kernel.h> |
| 26 | #include <linux/module.h> |
| 27 | #include <linux/percpu.h> |
| 28 | #include <linux/sched.h> |
| 29 | #include <linux/init.h> |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 30 | #include <linux/slab.h> |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 31 | #include <linux/list.h> |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 32 | #include <linux/cpu.h> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 33 | #include <linux/smp.h> |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 34 | #include <linux/bug.h> |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 35 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 36 | #include <linux/hw_breakpoint.h> |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 37 | /* |
| 38 | * Constraints data |
| 39 | */ |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 40 | struct bp_cpuinfo { |
| 41 | /* Number of pinned cpu breakpoints in a cpu */ |
| 42 | unsigned int cpu_pinned; |
| 43 | /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */ |
| 44 | unsigned int *tsk_pinned; |
| 45 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
| 46 | unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */ |
| 47 | }; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 48 | |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 49 | static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]); |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 50 | static int nr_slots[TYPE_MAX]; |
| 51 | |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 52 | static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) |
| 53 | { |
| 54 | return per_cpu_ptr(bp_cpuinfo + type, cpu); |
| 55 | } |
| 56 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 57 | /* Keep track of the breakpoints attached to tasks */ |
| 58 | static LIST_HEAD(bp_task_head); |
| 59 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 60 | static int constraints_initialized; |
| 61 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 62 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
| 63 | struct bp_busy_slots { |
| 64 | unsigned int pinned; |
| 65 | unsigned int flexible; |
| 66 | }; |
| 67 | |
| 68 | /* Serialize accesses to the above constraints */ |
| 69 | static DEFINE_MUTEX(nr_bp_mutex); |
| 70 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 71 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
| 72 | { |
| 73 | return 1; |
| 74 | } |
| 75 | |
Jiri Olsa | cbd9d9f | 2018-03-12 14:45:41 +0100 | [diff] [blame] | 76 | static inline enum bp_type_idx find_slot_idx(u64 bp_type) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 77 | { |
Jiri Olsa | cbd9d9f | 2018-03-12 14:45:41 +0100 | [diff] [blame] | 78 | if (bp_type & HW_BREAKPOINT_RW) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 79 | return TYPE_DATA; |
| 80 | |
| 81 | return TYPE_INST; |
| 82 | } |
| 83 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 84 | /* |
| 85 | * Report the maximum number of pinned breakpoints a task |
| 86 | * have in this cpu |
| 87 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 88 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 89 | { |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 90 | unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 91 | int i; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 92 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 93 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 94 | if (tsk_pinned[i] > 0) |
| 95 | return i + 1; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 96 | } |
| 97 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 98 | return 0; |
| 99 | } |
| 100 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 101 | /* |
| 102 | * Count the number of breakpoints of the same type and same task. |
| 103 | * The given event must be not on the list. |
| 104 | */ |
Michael Neuling | 0d85535 | 2012-10-26 18:28:56 +0200 | [diff] [blame] | 105 | static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 106 | { |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 107 | struct task_struct *tsk = bp->hw.target; |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 108 | struct perf_event *iter; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 109 | int count = 0; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 110 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 111 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 112 | if (iter->hw.target == tsk && |
Jiri Olsa | cbd9d9f | 2018-03-12 14:45:41 +0100 | [diff] [blame] | 113 | find_slot_idx(iter->attr.bp_type) == type && |
Oleg Nesterov | 8b4d801 | 2013-06-20 17:50:06 +0200 | [diff] [blame] | 114 | (iter->cpu < 0 || cpu == iter->cpu)) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 115 | count += hw_breakpoint_weight(iter); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 116 | } |
| 117 | |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 118 | return count; |
| 119 | } |
| 120 | |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 121 | static const struct cpumask *cpumask_of_bp(struct perf_event *bp) |
| 122 | { |
| 123 | if (bp->cpu >= 0) |
| 124 | return cpumask_of(bp->cpu); |
| 125 | return cpu_possible_mask; |
| 126 | } |
| 127 | |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 128 | /* |
| 129 | * Report the number of pinned/un-pinned breakpoints we have in |
| 130 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
| 131 | */ |
| 132 | static void |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 133 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
| 134 | enum bp_type_idx type) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 135 | { |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 136 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 137 | int cpu; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 138 | |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 139 | for_each_cpu(cpu, cpumask) { |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 140 | struct bp_cpuinfo *info = get_bp_info(cpu, type); |
| 141 | int nr; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 142 | |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 143 | nr = info->cpu_pinned; |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 144 | if (!bp->hw.target) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 145 | nr += max_task_bp_pinned(cpu, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 146 | else |
Michael Neuling | 0d85535 | 2012-10-26 18:28:56 +0200 | [diff] [blame] | 147 | nr += task_bp_pinned(cpu, bp, type); |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 148 | |
| 149 | if (nr > slots->pinned) |
| 150 | slots->pinned = nr; |
| 151 | |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 152 | nr = info->flexible; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 153 | if (nr > slots->flexible) |
| 154 | slots->flexible = nr; |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | /* |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 159 | * For now, continue to consider flexible as pinned, until we can |
| 160 | * ensure no flexible event can ever be scheduled before a pinned event |
| 161 | * in a same cpu. |
| 162 | */ |
| 163 | static void |
| 164 | fetch_this_slot(struct bp_busy_slots *slots, int weight) |
| 165 | { |
| 166 | slots->pinned += weight; |
| 167 | } |
| 168 | |
| 169 | /* |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 170 | * Add a pinned breakpoint for the given task in our constraint table |
| 171 | */ |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 172 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 173 | enum bp_type_idx type, int weight) |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 174 | { |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 175 | unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 176 | int old_idx, new_idx; |
Frederic Weisbecker | 5605317 | 2009-12-07 06:46:48 +0100 | [diff] [blame] | 177 | |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 178 | old_idx = task_bp_pinned(cpu, bp, type) - 1; |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 179 | new_idx = old_idx + weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 180 | |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 181 | if (old_idx >= 0) |
| 182 | tsk_pinned[old_idx]--; |
| 183 | if (new_idx >= 0) |
| 184 | tsk_pinned[new_idx]++; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Add/remove the given breakpoint in our constraint table |
| 189 | */ |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 190 | static void |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 191 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
| 192 | int weight) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 193 | { |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 194 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
| 195 | int cpu; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 196 | |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 197 | if (!enable) |
| 198 | weight = -weight; |
| 199 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 200 | /* Pinned counter cpu profiling */ |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 201 | if (!bp->hw.target) { |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 202 | get_bp_info(bp->cpu, type)->cpu_pinned += weight; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 203 | return; |
| 204 | } |
| 205 | |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 206 | /* Pinned counter task profiling */ |
Oleg Nesterov | 1c10adb | 2013-06-20 17:50:15 +0200 | [diff] [blame] | 207 | for_each_cpu(cpu, cpumask) |
Oleg Nesterov | 7ab71f3 | 2013-06-20 17:50:13 +0200 | [diff] [blame] | 208 | toggle_bp_task_slot(bp, cpu, type, weight); |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 209 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 210 | if (enable) |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 211 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
Oleg Nesterov | e1ebe86 | 2013-06-20 17:50:11 +0200 | [diff] [blame] | 212 | else |
| 213 | list_del(&bp->hw.bp_list); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 214 | } |
| 215 | |
Ravi Bangoria | 29da4f9 | 2020-05-14 16:47:39 +0530 | [diff] [blame] | 216 | __weak int arch_reserve_bp_slot(struct perf_event *bp) |
| 217 | { |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | __weak void arch_release_bp_slot(struct perf_event *bp) |
| 222 | { |
| 223 | } |
| 224 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 225 | /* |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 226 | * Function to perform processor-specific cleanup during unregistration |
| 227 | */ |
| 228 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) |
| 229 | { |
| 230 | /* |
| 231 | * A weak stub function here for those archs that don't define |
| 232 | * it inside arch/.../kernel/hw_breakpoint.c |
| 233 | */ |
| 234 | } |
| 235 | |
| 236 | /* |
Ingo Molnar | fca0c11 | 2018-12-03 10:52:21 +0100 | [diff] [blame] | 237 | * Constraints to check before allowing this new breakpoint counter: |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 238 | * |
| 239 | * == Non-pinned counter == (Considered as pinned for now) |
| 240 | * |
| 241 | * - If attached to a single cpu, check: |
| 242 | * |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 243 | * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu) |
| 244 | * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 245 | * |
| 246 | * -> If there are already non-pinned counters in this cpu, it means |
| 247 | * there is already a free slot for them. |
| 248 | * Otherwise, we check that the maximum number of per task |
| 249 | * breakpoints (for this cpu) plus the number of per cpu breakpoint |
| 250 | * (for this cpu) doesn't cover every registers. |
| 251 | * |
| 252 | * - If attached to every cpus, check: |
| 253 | * |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 254 | * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *)) |
| 255 | * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 256 | * |
| 257 | * -> This is roughly the same, except we check the number of per cpu |
| 258 | * bp for every cpu and we keep the max one. Same for the per tasks |
| 259 | * breakpoints. |
| 260 | * |
| 261 | * |
| 262 | * == Pinned counter == |
| 263 | * |
| 264 | * - If attached to a single cpu, check: |
| 265 | * |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 266 | * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu) |
| 267 | * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 268 | * |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 269 | * -> Same checks as before. But now the info->flexible, if any, must keep |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 270 | * one register at least (or they will never be fed). |
| 271 | * |
| 272 | * - If attached to every cpus, check: |
| 273 | * |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 274 | * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *)) |
| 275 | * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 276 | */ |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 277 | static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type) |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 278 | { |
| 279 | struct bp_busy_slots slots = {0}; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 280 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 281 | int weight; |
Ravi Bangoria | 29da4f9 | 2020-05-14 16:47:39 +0530 | [diff] [blame] | 282 | int ret; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 283 | |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 284 | /* We couldn't initialize breakpoint constraints on boot */ |
| 285 | if (!constraints_initialized) |
| 286 | return -ENOMEM; |
| 287 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 288 | /* Basic checks */ |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 289 | if (bp_type == HW_BREAKPOINT_EMPTY || |
| 290 | bp_type == HW_BREAKPOINT_INVALID) |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 291 | return -EINVAL; |
| 292 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 293 | type = find_slot_idx(bp_type); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 294 | weight = hw_breakpoint_weight(bp); |
| 295 | |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 296 | fetch_bp_busy_slots(&slots, bp, type); |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 297 | /* |
| 298 | * Simulate the addition of this breakpoint to the constraints |
| 299 | * and see the result. |
| 300 | */ |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 301 | fetch_this_slot(&slots, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 302 | |
| 303 | /* Flexible counters need to keep at least one slot */ |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 304 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 305 | return -ENOSPC; |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 306 | |
Ravi Bangoria | 29da4f9 | 2020-05-14 16:47:39 +0530 | [diff] [blame] | 307 | ret = arch_reserve_bp_slot(bp); |
| 308 | if (ret) |
| 309 | return ret; |
| 310 | |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 311 | toggle_bp_slot(bp, true, type, weight); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 312 | |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | int reserve_bp_slot(struct perf_event *bp) |
| 317 | { |
| 318 | int ret; |
| 319 | |
| 320 | mutex_lock(&nr_bp_mutex); |
| 321 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 322 | ret = __reserve_bp_slot(bp, bp->attr.bp_type); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 323 | |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 324 | mutex_unlock(&nr_bp_mutex); |
| 325 | |
| 326 | return ret; |
| 327 | } |
| 328 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 329 | static void __release_bp_slot(struct perf_event *bp, u64 bp_type) |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 330 | { |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 331 | enum bp_type_idx type; |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 332 | int weight; |
Frederic Weisbecker | 0102752 | 2010-04-11 18:55:56 +0200 | [diff] [blame] | 333 | |
Ravi Bangoria | 29da4f9 | 2020-05-14 16:47:39 +0530 | [diff] [blame] | 334 | arch_release_bp_slot(bp); |
| 335 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 336 | type = find_slot_idx(bp_type); |
Frederic Weisbecker | f93a205 | 2010-04-13 00:32:30 +0200 | [diff] [blame] | 337 | weight = hw_breakpoint_weight(bp); |
| 338 | toggle_bp_slot(bp, false, type, weight); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 339 | } |
| 340 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 341 | void release_bp_slot(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 342 | { |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 343 | mutex_lock(&nr_bp_mutex); |
| 344 | |
K.Prasad | f7136c5 | 2010-06-15 11:34:34 +0530 | [diff] [blame] | 345 | arch_unregister_hw_breakpoint(bp); |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 346 | __release_bp_slot(bp, bp->attr.bp_type); |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 347 | |
| 348 | mutex_unlock(&nr_bp_mutex); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 349 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 350 | |
Frederic Weisbecker | cb8b788 | 2018-06-26 04:58:58 +0200 | [diff] [blame] | 351 | static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 352 | { |
| 353 | int err; |
| 354 | |
| 355 | __release_bp_slot(bp, old_type); |
| 356 | |
Frederic Weisbecker | cb8b788 | 2018-06-26 04:58:58 +0200 | [diff] [blame] | 357 | err = __reserve_bp_slot(bp, new_type); |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 358 | if (err) { |
| 359 | /* |
| 360 | * Reserve the old_type slot back in case |
| 361 | * there's no space for the new type. |
| 362 | * |
| 363 | * This must succeed, because we just released |
| 364 | * the old_type slot in the __release_bp_slot |
| 365 | * call above. If not, something is broken. |
| 366 | */ |
| 367 | WARN_ON(__reserve_bp_slot(bp, old_type)); |
| 368 | } |
| 369 | |
| 370 | return err; |
| 371 | } |
| 372 | |
Frederic Weisbecker | cb8b788 | 2018-06-26 04:58:58 +0200 | [diff] [blame] | 373 | static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type) |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 374 | { |
| 375 | int ret; |
| 376 | |
| 377 | mutex_lock(&nr_bp_mutex); |
Frederic Weisbecker | cb8b788 | 2018-06-26 04:58:58 +0200 | [diff] [blame] | 378 | ret = __modify_bp_slot(bp, old_type, new_type); |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 379 | mutex_unlock(&nr_bp_mutex); |
| 380 | return ret; |
| 381 | } |
| 382 | |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 383 | /* |
| 384 | * Allow the kernel debugger to reserve breakpoint slots without |
| 385 | * taking a lock using the dbg_* variant of for the reserve and |
| 386 | * release breakpoint slots. |
| 387 | */ |
| 388 | int dbg_reserve_bp_slot(struct perf_event *bp) |
| 389 | { |
| 390 | if (mutex_is_locked(&nr_bp_mutex)) |
| 391 | return -1; |
| 392 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 393 | return __reserve_bp_slot(bp, bp->attr.bp_type); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | int dbg_release_bp_slot(struct perf_event *bp) |
| 397 | { |
| 398 | if (mutex_is_locked(&nr_bp_mutex)) |
| 399 | return -1; |
| 400 | |
Jiri Olsa | 1ad9ff7 | 2018-03-12 14:45:42 +0100 | [diff] [blame] | 401 | __release_bp_slot(bp, bp->attr.bp_type); |
Jason Wessel | 5352ae63 | 2010-01-28 17:04:43 -0600 | [diff] [blame] | 402 | |
| 403 | return 0; |
| 404 | } |
Frederic Weisbecker | ba1c813 | 2009-09-10 09:26:21 +0200 | [diff] [blame] | 405 | |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 406 | static int hw_breakpoint_parse(struct perf_event *bp, |
| 407 | const struct perf_event_attr *attr, |
| 408 | struct arch_hw_breakpoint *hw) |
| 409 | { |
| 410 | int err; |
| 411 | |
| 412 | err = hw_breakpoint_arch_parse(bp, attr, hw); |
| 413 | if (err) |
| 414 | return err; |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 415 | |
Frederic Weisbecker | 8e983ff | 2018-06-26 04:58:49 +0200 | [diff] [blame] | 416 | if (arch_check_bp_in_kernelspace(hw)) { |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 417 | if (attr->exclude_kernel) |
Frederic Weisbecker | b2812d0 | 2010-04-18 18:11:53 +0200 | [diff] [blame] | 418 | return -EINVAL; |
| 419 | /* |
| 420 | * Don't let unprivileged users set a breakpoint in the trap |
| 421 | * path to avoid trap recursion attacks. |
| 422 | */ |
| 423 | if (!capable(CAP_SYS_ADMIN)) |
| 424 | return -EPERM; |
| 425 | } |
| 426 | |
| 427 | return 0; |
| 428 | } |
| 429 | |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 430 | int register_perf_hw_breakpoint(struct perf_event *bp) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 431 | { |
Mark-PK Tsai | 310aa0a | 2019-09-06 14:01:16 +0800 | [diff] [blame] | 432 | struct arch_hw_breakpoint hw = { }; |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 433 | int err; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 434 | |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 435 | err = reserve_bp_slot(bp); |
| 436 | if (err) |
| 437 | return err; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 438 | |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 439 | err = hw_breakpoint_parse(bp, &bp->attr, &hw); |
| 440 | if (err) { |
Mahesh Salgaonkar | b23ff0e | 2010-01-21 18:25:16 +0530 | [diff] [blame] | 441 | release_bp_slot(bp); |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 442 | return err; |
| 443 | } |
Mahesh Salgaonkar | b23ff0e | 2010-01-21 18:25:16 +0530 | [diff] [blame] | 444 | |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 445 | bp->hw.info = hw; |
| 446 | |
| 447 | return 0; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 448 | } |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 449 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 450 | /** |
| 451 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 452 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 453 | * @triggered: callback to trigger when we hit the breakpoint |
Haocheng Xie | 875dd7b | 2021-05-27 11:19:47 +0800 | [diff] [blame] | 454 | * @context: context data could be used in the triggered callback |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 455 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 456 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 457 | struct perf_event * |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 458 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 459 | perf_overflow_handler_t triggered, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 460 | void *context, |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 461 | struct task_struct *tsk) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 462 | { |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 463 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered, |
| 464 | context); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 465 | } |
| 466 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 467 | |
Frederic Weisbecker | 26c6ccd | 2018-06-26 04:58:59 +0200 | [diff] [blame] | 468 | static void hw_breakpoint_copy_attr(struct perf_event_attr *to, |
| 469 | struct perf_event_attr *from) |
| 470 | { |
| 471 | to->bp_addr = from->bp_addr; |
| 472 | to->bp_type = from->bp_type; |
| 473 | to->bp_len = from->bp_len; |
| 474 | to->disabled = from->disabled; |
| 475 | } |
| 476 | |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 477 | int |
Jiri Olsa | 705feaf | 2018-03-12 14:45:45 +0100 | [diff] [blame] | 478 | modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, |
| 479 | bool check) |
Jiri Olsa | 18ff57b | 2018-03-12 14:45:44 +0100 | [diff] [blame] | 480 | { |
Mark-PK Tsai | 310aa0a | 2019-09-06 14:01:16 +0800 | [diff] [blame] | 481 | struct arch_hw_breakpoint hw = { }; |
Frederic Weisbecker | 26c6ccd | 2018-06-26 04:58:59 +0200 | [diff] [blame] | 482 | int err; |
Jiri Olsa | 705feaf | 2018-03-12 14:45:45 +0100 | [diff] [blame] | 483 | |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 484 | err = hw_breakpoint_parse(bp, attr, &hw); |
Frederic Weisbecker | 26c6ccd | 2018-06-26 04:58:59 +0200 | [diff] [blame] | 485 | if (err) |
Jiri Olsa | 18ff57b | 2018-03-12 14:45:44 +0100 | [diff] [blame] | 486 | return err; |
Frederic Weisbecker | 26c6ccd | 2018-06-26 04:58:59 +0200 | [diff] [blame] | 487 | |
| 488 | if (check) { |
| 489 | struct perf_event_attr old_attr; |
| 490 | |
| 491 | old_attr = bp->attr; |
| 492 | hw_breakpoint_copy_attr(&old_attr, attr); |
| 493 | if (memcmp(&old_attr, attr, sizeof(*attr))) |
| 494 | return -EINVAL; |
Jiri Olsa | 18ff57b | 2018-03-12 14:45:44 +0100 | [diff] [blame] | 495 | } |
| 496 | |
Frederic Weisbecker | 26c6ccd | 2018-06-26 04:58:59 +0200 | [diff] [blame] | 497 | if (bp->attr.bp_type != attr->bp_type) { |
| 498 | err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type); |
| 499 | if (err) |
| 500 | return err; |
| 501 | } |
| 502 | |
| 503 | hw_breakpoint_copy_attr(&bp->attr, attr); |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 504 | bp->hw.info = hw; |
Frederic Weisbecker | 9a4903d | 2018-06-26 04:58:48 +0200 | [diff] [blame] | 505 | |
Jiri Olsa | 18ff57b | 2018-03-12 14:45:44 +0100 | [diff] [blame] | 506 | return 0; |
| 507 | } |
| 508 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 509 | /** |
| 510 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 511 | * @bp: the breakpoint structure to modify |
Frederic Weisbecker | 5fa10b2 | 2009-11-27 04:55:53 +0100 | [diff] [blame] | 512 | * @attr: new breakpoint attributes |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 513 | */ |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 514 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 515 | { |
Jiri Olsa | bd14406 | 2018-08-27 11:12:25 +0200 | [diff] [blame] | 516 | int err; |
| 517 | |
K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 518 | /* |
| 519 | * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it |
| 520 | * will not be possible to raise IPIs that invoke __perf_event_disable. |
| 521 | * So call the function directly after making sure we are targeting the |
| 522 | * current task. |
| 523 | */ |
| 524 | if (irqs_disabled() && bp->ctx && bp->ctx->task == current) |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 525 | perf_event_disable_local(bp); |
K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 526 | else |
| 527 | perf_event_disable(bp); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 528 | |
Jiri Olsa | bd14406 | 2018-08-27 11:12:25 +0200 | [diff] [blame] | 529 | err = modify_user_hw_breakpoint_check(bp, attr, false); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 530 | |
Jiri Olsa | 9695583 | 2018-08-27 11:12:27 +0200 | [diff] [blame] | 531 | if (!bp->attr.disabled) |
Jiri Olsa | ea6a9d5 | 2018-03-12 14:45:43 +0100 | [diff] [blame] | 532 | perf_event_enable(bp); |
Jiri Olsa | cb45302 | 2018-08-27 11:12:26 +0200 | [diff] [blame] | 533 | |
Jiri Olsa | 9695583 | 2018-08-27 11:12:27 +0200 | [diff] [blame] | 534 | return err; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 535 | } |
| 536 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
| 537 | |
| 538 | /** |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 539 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 540 | * @bp: the breakpoint structure to unregister |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 541 | */ |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 542 | void unregister_hw_breakpoint(struct perf_event *bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 543 | { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 544 | if (!bp) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 545 | return; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 546 | perf_event_release_kernel(bp); |
| 547 | } |
| 548 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
| 549 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 550 | /** |
| 551 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 552 | * @attr: breakpoint attributes |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 553 | * @triggered: callback to trigger when we hit the breakpoint |
Haocheng Xie | 875dd7b | 2021-05-27 11:19:47 +0800 | [diff] [blame] | 554 | * @context: context data could be used in the triggered callback |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 555 | * |
| 556 | * @return a set of per_cpu pointers to perf events |
| 557 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 558 | struct perf_event * __percpu * |
Frederic Weisbecker | dd1853c | 2009-11-27 04:55:54 +0100 | [diff] [blame] | 559 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 560 | perf_overflow_handler_t triggered, |
| 561 | void *context) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 562 | { |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame] | 563 | struct perf_event * __percpu *cpu_events, *bp; |
| 564 | long err = 0; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 565 | int cpu; |
| 566 | |
| 567 | cpu_events = alloc_percpu(typeof(*cpu_events)); |
| 568 | if (!cpu_events) |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 569 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 570 | |
Sebastian Andrzej Siewior | ffec09f | 2021-08-03 16:15:54 +0200 | [diff] [blame] | 571 | cpus_read_lock(); |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 572 | for_each_online_cpu(cpu) { |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 573 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, |
| 574 | triggered, context); |
Frederic Weisbecker | 605bfae | 2009-11-26 05:35:42 +0100 | [diff] [blame] | 575 | if (IS_ERR(bp)) { |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 576 | err = PTR_ERR(bp); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 577 | break; |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame] | 578 | } |
| 579 | |
| 580 | per_cpu(*cpu_events, cpu) = bp; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 581 | } |
Sebastian Andrzej Siewior | ffec09f | 2021-08-03 16:15:54 +0200 | [diff] [blame] | 582 | cpus_read_unlock(); |
Li Zefan | 88f7a890 | 2009-12-30 14:22:22 +0800 | [diff] [blame] | 583 | |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame] | 584 | if (likely(!err)) |
| 585 | return cpu_events; |
| 586 | |
| 587 | unregister_wide_hw_breakpoint(cpu_events); |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 588 | return (void __percpu __force *)ERR_PTR(err); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 589 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 590 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 591 | |
| 592 | /** |
| 593 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
| 594 | * @cpu_events: the per cpu set of events to unregister |
| 595 | */ |
Tejun Heo | 44ee635 | 2010-02-17 10:50:50 +0900 | [diff] [blame] | 596 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 597 | { |
| 598 | int cpu; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 599 | |
Oleg Nesterov | e12cbc1 | 2013-06-20 17:50:18 +0200 | [diff] [blame] | 600 | for_each_possible_cpu(cpu) |
| 601 | unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); |
| 602 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 603 | free_percpu(cpu_events); |
| 604 | } |
Frederic Weisbecker | f60d24d | 2009-11-10 10:17:07 +0100 | [diff] [blame] | 605 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 606 | |
| 607 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
| 608 | .notifier_call = hw_breakpoint_exceptions_notify, |
| 609 | /* we need to be notified first */ |
| 610 | .priority = 0x7fffffff |
| 611 | }; |
| 612 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 613 | static void bp_perf_event_destroy(struct perf_event *event) |
| 614 | { |
| 615 | release_bp_slot(event); |
| 616 | } |
| 617 | |
| 618 | static int hw_breakpoint_event_init(struct perf_event *bp) |
| 619 | { |
| 620 | int err; |
| 621 | |
| 622 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) |
| 623 | return -ENOENT; |
| 624 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 625 | /* |
| 626 | * no branch sampling for breakpoint events |
| 627 | */ |
| 628 | if (has_branch_stack(bp)) |
| 629 | return -EOPNOTSUPP; |
| 630 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 631 | err = register_perf_hw_breakpoint(bp); |
| 632 | if (err) |
| 633 | return err; |
| 634 | |
| 635 | bp->destroy = bp_perf_event_destroy; |
| 636 | |
| 637 | return 0; |
| 638 | } |
| 639 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 640 | static int hw_breakpoint_add(struct perf_event *bp, int flags) |
| 641 | { |
| 642 | if (!(flags & PERF_EF_START)) |
| 643 | bp->hw.state = PERF_HES_STOPPED; |
| 644 | |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 645 | if (is_sampling_event(bp)) { |
| 646 | bp->hw.last_period = bp->hw.sample_period; |
| 647 | perf_swevent_set_period(bp); |
| 648 | } |
| 649 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 650 | return arch_install_hw_breakpoint(bp); |
| 651 | } |
| 652 | |
| 653 | static void hw_breakpoint_del(struct perf_event *bp, int flags) |
| 654 | { |
| 655 | arch_uninstall_hw_breakpoint(bp); |
| 656 | } |
| 657 | |
| 658 | static void hw_breakpoint_start(struct perf_event *bp, int flags) |
| 659 | { |
| 660 | bp->hw.state = 0; |
| 661 | } |
| 662 | |
| 663 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) |
| 664 | { |
| 665 | bp->hw.state = PERF_HES_STOPPED; |
| 666 | } |
| 667 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 668 | static struct pmu perf_breakpoint = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 669 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
| 670 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 671 | .event_init = hw_breakpoint_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 672 | .add = hw_breakpoint_add, |
| 673 | .del = hw_breakpoint_del, |
| 674 | .start = hw_breakpoint_start, |
| 675 | .stop = hw_breakpoint_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 676 | .read = hw_breakpoint_pmu_read, |
| 677 | }; |
| 678 | |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 679 | int __init init_hw_breakpoint(void) |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 680 | { |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 681 | int cpu, err_cpu; |
| 682 | int i; |
| 683 | |
| 684 | for (i = 0; i < TYPE_MAX; i++) |
| 685 | nr_slots[i] = hw_breakpoint_slots(i); |
| 686 | |
| 687 | for_each_possible_cpu(cpu) { |
| 688 | for (i = 0; i < TYPE_MAX; i++) { |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 689 | struct bp_cpuinfo *info = get_bp_info(cpu, i); |
| 690 | |
| 691 | info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int), |
| 692 | GFP_KERNEL); |
| 693 | if (!info->tsk_pinned) |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 694 | goto err_alloc; |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | constraints_initialized = 1; |
| 699 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 700 | perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 701 | |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 702 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 703 | |
| 704 | err_alloc: |
| 705 | for_each_possible_cpu(err_cpu) { |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 706 | for (i = 0; i < TYPE_MAX; i++) |
Oleg Nesterov | bde9603 | 2013-06-20 17:50:20 +0200 | [diff] [blame] | 707 | kfree(get_bp_info(err_cpu, i)->tsk_pinned); |
Namhyung Kim | 30ce2f7 | 2012-02-28 10:19:38 +0900 | [diff] [blame] | 708 | if (err_cpu == cpu) |
| 709 | break; |
Frederic Weisbecker | feef47d | 2010-04-23 05:59:55 +0200 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | return -ENOMEM; |
K.Prasad | 62a038d | 2009-06-01 23:43:33 +0530 | [diff] [blame] | 713 | } |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 714 | |
| 715 | |