Steven Rostedt (VMware) | bcea3f9 | 2018-08-16 11:23:53 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 2 | /* |
Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 3 | * trace event based perf event profiling/tracing |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 4 | * |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 5 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra |
Frederic Weisbecker | c530665 | 2010-03-03 07:16:16 +0100 | [diff] [blame] | 6 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Li Zefan | 558e654 | 2009-08-24 12:19:47 +0800 | [diff] [blame] | 9 | #include <linux/module.h> |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 10 | #include <linux/kprobes.h> |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11 | #include <linux/security.h> |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 12 | #include "trace.h" |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 13 | #include "trace_probe.h" |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 14 | |
Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 15 | static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 16 | |
Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 17 | /* |
| 18 | * Force it to be aligned to unsigned long to avoid misaligned accesses |
Ingo Molnar | f2cc020 | 2021-03-23 18:49:35 +0100 | [diff] [blame] | 19 | * surprises |
Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 20 | */ |
| 21 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) |
| 22 | perf_trace_t; |
Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 23 | |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 24 | /* Count the events in use (per event id, not per instance) */ |
Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 25 | static int total_ref_count; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 26 | |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 27 | static int perf_trace_event_perm(struct trace_event_call *tp_event, |
Frederic Weisbecker | 61c3265 | 2010-11-18 01:39:17 +0100 | [diff] [blame] | 28 | struct perf_event *p_event) |
| 29 | { |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 30 | int ret; |
| 31 | |
Peter Zijlstra | d5b5f39 | 2013-11-14 16:23:04 +0100 | [diff] [blame] | 32 | if (tp_event->perf_perm) { |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 33 | ret = tp_event->perf_perm(tp_event, p_event); |
Peter Zijlstra | d5b5f39 | 2013-11-14 16:23:04 +0100 | [diff] [blame] | 34 | if (ret) |
| 35 | return ret; |
| 36 | } |
| 37 | |
Jiri Olsa | f4be073 | 2014-07-16 14:33:29 +0200 | [diff] [blame] | 38 | /* |
| 39 | * We checked and allowed to create parent, |
| 40 | * allow children without checking. |
| 41 | */ |
| 42 | if (p_event->parent) |
| 43 | return 0; |
| 44 | |
| 45 | /* |
| 46 | * It's ok to check current process (owner) permissions in here, |
| 47 | * because code below is called only via perf_event_open syscall. |
| 48 | */ |
| 49 | |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 50 | /* The ftrace function trace is allowed only for root. */ |
Jiri Olsa | cfa77bc | 2014-03-02 16:56:38 +0100 | [diff] [blame] | 51 | if (ftrace_event_is_function(tp_event)) { |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 52 | ret = perf_allow_tracepoint(&p_event->attr); |
| 53 | if (ret) |
| 54 | return ret; |
Jiri Olsa | cfa77bc | 2014-03-02 16:56:38 +0100 | [diff] [blame] | 55 | |
Jiri Olsa | 0a74c5b | 2016-03-16 15:34:29 +0100 | [diff] [blame] | 56 | if (!is_sampling_event(p_event)) |
| 57 | return 0; |
| 58 | |
Jiri Olsa | cfa77bc | 2014-03-02 16:56:38 +0100 | [diff] [blame] | 59 | /* |
| 60 | * We don't allow user space callchains for function trace |
| 61 | * event, due to issues with page faults while tracing page |
| 62 | * fault handler and its overall trickiness nature. |
| 63 | */ |
| 64 | if (!p_event->attr.exclude_callchain_user) |
| 65 | return -EINVAL; |
Jiri Olsa | 63c45f4 | 2014-03-02 16:56:39 +0100 | [diff] [blame] | 66 | |
| 67 | /* |
| 68 | * Same reason to disable user stack dump as for user space |
| 69 | * callchains above. |
| 70 | */ |
| 71 | if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) |
| 72 | return -EINVAL; |
Jiri Olsa | cfa77bc | 2014-03-02 16:56:38 +0100 | [diff] [blame] | 73 | } |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 74 | |
Frederic Weisbecker | 61c3265 | 2010-11-18 01:39:17 +0100 | [diff] [blame] | 75 | /* No tracing, just counting, so no obvious leak */ |
| 76 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) |
| 77 | return 0; |
| 78 | |
| 79 | /* Some events are ok to be traced by non-root users... */ |
| 80 | if (p_event->attach_state == PERF_ATTACH_TASK) { |
| 81 | if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * ...otherwise raw tracepoint data can be a severe data leak, |
| 87 | * only allow root to have these. |
| 88 | */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 89 | ret = perf_allow_tracepoint(&p_event->attr); |
| 90 | if (ret) |
| 91 | return ret; |
Frederic Weisbecker | 61c3265 | 2010-11-18 01:39:17 +0100 | [diff] [blame] | 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 96 | static int perf_trace_event_reg(struct trace_event_call *tp_event, |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 97 | struct perf_event *p_event) |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 98 | { |
Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 99 | struct hlist_head __percpu *list; |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 100 | int ret = -ENOMEM; |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 101 | int cpu; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 102 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 103 | p_event->tp_event = tp_event; |
| 104 | if (tp_event->perf_refcount++ > 0) |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 105 | return 0; |
| 106 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 107 | list = alloc_percpu(struct hlist_head); |
| 108 | if (!list) |
| 109 | goto fail; |
| 110 | |
| 111 | for_each_possible_cpu(cpu) |
| 112 | INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); |
| 113 | |
| 114 | tp_event->perf_events = list; |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 115 | |
Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 116 | if (!total_ref_count) { |
Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 117 | char __percpu *buf; |
Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 118 | int i; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 119 | |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 120 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 121 | buf = (char __percpu *)alloc_percpu(perf_trace_t); |
Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 122 | if (!buf) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 123 | goto fail; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 124 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 125 | perf_trace_buf[i] = buf; |
Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 126 | } |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 127 | } |
| 128 | |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 129 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 130 | if (ret) |
| 131 | goto fail; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 132 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 133 | total_ref_count++; |
| 134 | return 0; |
| 135 | |
| 136 | fail: |
Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 137 | if (!total_ref_count) { |
| 138 | int i; |
| 139 | |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 140 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 141 | free_percpu(perf_trace_buf[i]); |
| 142 | perf_trace_buf[i] = NULL; |
| 143 | } |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 144 | } |
| 145 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 146 | if (!--tp_event->perf_refcount) { |
| 147 | free_percpu(tp_event->perf_events); |
| 148 | tp_event->perf_events = NULL; |
Frederic Weisbecker | fe8e5b5 | 2009-10-03 14:55:18 +0200 | [diff] [blame] | 149 | } |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 150 | |
| 151 | return ret; |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 152 | } |
| 153 | |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 154 | static void perf_trace_event_unreg(struct perf_event *p_event) |
| 155 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 156 | struct trace_event_call *tp_event = p_event->tp_event; |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 157 | int i; |
| 158 | |
| 159 | if (--tp_event->perf_refcount > 0) |
| 160 | goto out; |
| 161 | |
| 162 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); |
| 163 | |
| 164 | /* |
| 165 | * Ensure our callback won't be called anymore. The buffers |
| 166 | * will be freed after that. |
| 167 | */ |
| 168 | tracepoint_synchronize_unregister(); |
| 169 | |
| 170 | free_percpu(tp_event->perf_events); |
| 171 | tp_event->perf_events = NULL; |
| 172 | |
| 173 | if (!--total_ref_count) { |
| 174 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { |
| 175 | free_percpu(perf_trace_buf[i]); |
| 176 | perf_trace_buf[i] = NULL; |
| 177 | } |
| 178 | } |
| 179 | out: |
Steven Rostedt (VMware) | 1d18538 | 2021-08-16 23:42:57 -0400 | [diff] [blame] | 180 | trace_event_put_ref(tp_event); |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static int perf_trace_event_open(struct perf_event *p_event) |
| 184 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 185 | struct trace_event_call *tp_event = p_event->tp_event; |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 186 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); |
| 187 | } |
| 188 | |
| 189 | static void perf_trace_event_close(struct perf_event *p_event) |
| 190 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 191 | struct trace_event_call *tp_event = p_event->tp_event; |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 192 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); |
| 193 | } |
| 194 | |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 195 | static int perf_trace_event_init(struct trace_event_call *tp_event, |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 196 | struct perf_event *p_event) |
| 197 | { |
| 198 | int ret; |
| 199 | |
| 200 | ret = perf_trace_event_perm(tp_event, p_event); |
| 201 | if (ret) |
| 202 | return ret; |
| 203 | |
| 204 | ret = perf_trace_event_reg(tp_event, p_event); |
| 205 | if (ret) |
| 206 | return ret; |
| 207 | |
| 208 | ret = perf_trace_event_open(p_event); |
| 209 | if (ret) { |
| 210 | perf_trace_event_unreg(p_event); |
| 211 | return ret; |
| 212 | } |
| 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 217 | int perf_trace_init(struct perf_event *p_event) |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 218 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 219 | struct trace_event_call *tp_event; |
Vince Weaver | 0022ced | 2013-11-15 12:39:45 -0500 | [diff] [blame] | 220 | u64 event_id = p_event->attr.config; |
Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 221 | int ret = -EINVAL; |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 222 | |
Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 223 | mutex_lock(&event_mutex); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 224 | list_for_each_entry(tp_event, &ftrace_events, list) { |
Steven Rostedt | ff5f149 | 2010-05-21 11:49:57 -0400 | [diff] [blame] | 225 | if (tp_event->event.type == event_id && |
Steven Rostedt | a1d0ce8 | 2010-06-08 11:22:06 -0400 | [diff] [blame] | 226 | tp_event->class && tp_event->class->reg && |
Steven Rostedt (VMware) | 1d18538 | 2021-08-16 23:42:57 -0400 | [diff] [blame] | 227 | trace_event_try_get_ref(tp_event)) { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 228 | ret = perf_trace_event_init(tp_event, p_event); |
Li Zefan | 9cb627d | 2010-09-01 12:58:43 +0200 | [diff] [blame] | 229 | if (ret) |
Steven Rostedt (VMware) | 1d18538 | 2021-08-16 23:42:57 -0400 | [diff] [blame] | 230 | trace_event_put_ref(tp_event); |
Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 231 | break; |
| 232 | } |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 233 | } |
Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 234 | mutex_unlock(&event_mutex); |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 235 | |
Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 236 | return ret; |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 237 | } |
| 238 | |
Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 239 | void perf_trace_destroy(struct perf_event *p_event) |
| 240 | { |
| 241 | mutex_lock(&event_mutex); |
| 242 | perf_trace_event_close(p_event); |
| 243 | perf_trace_event_unreg(p_event); |
| 244 | mutex_unlock(&event_mutex); |
| 245 | } |
| 246 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 247 | #ifdef CONFIG_KPROBE_EVENTS |
| 248 | int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe) |
| 249 | { |
| 250 | int ret; |
| 251 | char *func = NULL; |
| 252 | struct trace_event_call *tp_event; |
| 253 | |
| 254 | if (p_event->attr.kprobe_func) { |
| 255 | func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL); |
| 256 | if (!func) |
| 257 | return -ENOMEM; |
| 258 | ret = strncpy_from_user( |
| 259 | func, u64_to_user_ptr(p_event->attr.kprobe_func), |
| 260 | KSYM_NAME_LEN); |
Masami Hiramatsu | 5da13ab | 2018-04-09 21:16:54 +0900 | [diff] [blame] | 261 | if (ret == KSYM_NAME_LEN) |
| 262 | ret = -E2BIG; |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 263 | if (ret < 0) |
| 264 | goto out; |
| 265 | |
| 266 | if (func[0] == '\0') { |
| 267 | kfree(func); |
| 268 | func = NULL; |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | tp_event = create_local_trace_kprobe( |
| 273 | func, (void *)(unsigned long)(p_event->attr.kprobe_addr), |
| 274 | p_event->attr.probe_offset, is_retprobe); |
| 275 | if (IS_ERR(tp_event)) { |
| 276 | ret = PTR_ERR(tp_event); |
| 277 | goto out; |
| 278 | } |
| 279 | |
Prateek Sood | 6b1340c | 2019-10-15 11:47:25 +0530 | [diff] [blame] | 280 | mutex_lock(&event_mutex); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 281 | ret = perf_trace_event_init(tp_event, p_event); |
| 282 | if (ret) |
| 283 | destroy_local_trace_kprobe(tp_event); |
Prateek Sood | 6b1340c | 2019-10-15 11:47:25 +0530 | [diff] [blame] | 284 | mutex_unlock(&event_mutex); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 285 | out: |
| 286 | kfree(func); |
| 287 | return ret; |
| 288 | } |
| 289 | |
| 290 | void perf_kprobe_destroy(struct perf_event *p_event) |
| 291 | { |
Prateek Sood | 6b1340c | 2019-10-15 11:47:25 +0530 | [diff] [blame] | 292 | mutex_lock(&event_mutex); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 293 | perf_trace_event_close(p_event); |
| 294 | perf_trace_event_unreg(p_event); |
Prateek Sood | 6b1340c | 2019-10-15 11:47:25 +0530 | [diff] [blame] | 295 | mutex_unlock(&event_mutex); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 296 | |
| 297 | destroy_local_trace_kprobe(p_event->tp_event); |
| 298 | } |
| 299 | #endif /* CONFIG_KPROBE_EVENTS */ |
| 300 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 301 | #ifdef CONFIG_UPROBE_EVENTS |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 302 | int perf_uprobe_init(struct perf_event *p_event, |
| 303 | unsigned long ref_ctr_offset, bool is_retprobe) |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 304 | { |
| 305 | int ret; |
| 306 | char *path = NULL; |
| 307 | struct trace_event_call *tp_event; |
| 308 | |
| 309 | if (!p_event->attr.uprobe_path) |
| 310 | return -EINVAL; |
Jann Horn | 83540fbc | 2019-02-20 17:54:43 +0100 | [diff] [blame] | 311 | |
| 312 | path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path), |
| 313 | PATH_MAX); |
| 314 | if (IS_ERR(path)) { |
| 315 | ret = PTR_ERR(path); |
| 316 | return (ret == -EINVAL) ? -E2BIG : ret; |
| 317 | } |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 318 | if (path[0] == '\0') { |
| 319 | ret = -EINVAL; |
| 320 | goto out; |
| 321 | } |
| 322 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 323 | tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset, |
| 324 | ref_ctr_offset, is_retprobe); |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 325 | if (IS_ERR(tp_event)) { |
| 326 | ret = PTR_ERR(tp_event); |
| 327 | goto out; |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * local trace_uprobe need to hold event_mutex to call |
| 332 | * uprobe_buffer_enable() and uprobe_buffer_disable(). |
| 333 | * event_mutex is not required for local trace_kprobes. |
| 334 | */ |
| 335 | mutex_lock(&event_mutex); |
| 336 | ret = perf_trace_event_init(tp_event, p_event); |
| 337 | if (ret) |
| 338 | destroy_local_trace_uprobe(tp_event); |
| 339 | mutex_unlock(&event_mutex); |
| 340 | out: |
| 341 | kfree(path); |
| 342 | return ret; |
| 343 | } |
| 344 | |
| 345 | void perf_uprobe_destroy(struct perf_event *p_event) |
| 346 | { |
| 347 | mutex_lock(&event_mutex); |
| 348 | perf_trace_event_close(p_event); |
| 349 | perf_trace_event_unreg(p_event); |
| 350 | mutex_unlock(&event_mutex); |
| 351 | destroy_local_trace_uprobe(p_event->tp_event); |
| 352 | } |
| 353 | #endif /* CONFIG_UPROBE_EVENTS */ |
| 354 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 355 | int perf_trace_add(struct perf_event *p_event, int flags) |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 356 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 357 | struct trace_event_call *tp_event = p_event->tp_event; |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 358 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 359 | if (!(flags & PERF_EF_START)) |
| 360 | p_event->hw.state = PERF_HES_STOPPED; |
| 361 | |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 362 | /* |
| 363 | * If TRACE_REG_PERF_ADD returns false; no custom action was performed |
| 364 | * and we need to take the default action of enqueueing our event on |
| 365 | * the right per-cpu hlist. |
| 366 | */ |
| 367 | if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) { |
| 368 | struct hlist_head __percpu *pcpu_list; |
| 369 | struct hlist_head *list; |
Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 370 | |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 371 | pcpu_list = tp_event->perf_events; |
| 372 | if (WARN_ON_ONCE(!pcpu_list)) |
| 373 | return -EINVAL; |
| 374 | |
| 375 | list = this_cpu_ptr(pcpu_list); |
| 376 | hlist_add_head_rcu(&p_event->hlist_entry, list); |
| 377 | } |
| 378 | |
| 379 | return 0; |
Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 380 | } |
| 381 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 382 | void perf_trace_del(struct perf_event *p_event, int flags) |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 383 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 384 | struct trace_event_call *tp_event = p_event->tp_event; |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 385 | |
| 386 | /* |
| 387 | * If TRACE_REG_PERF_DEL returns false; no custom action was performed |
| 388 | * and we need to take the default action of dequeueing our event from |
| 389 | * the right per-cpu hlist. |
| 390 | */ |
| 391 | if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event)) |
| 392 | hlist_del_rcu(&p_event->hlist_entry); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 393 | } |
Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 394 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 395 | void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 396 | { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 397 | char *raw_data; |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 398 | int rctx; |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 399 | |
Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 400 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
| 401 | |
Oleg Nesterov | cd92bf6 | 2013-06-17 19:02:11 +0200 | [diff] [blame] | 402 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
Robin H. Johnson | a90afe8 | 2021-08-30 21:37:22 -0700 | [diff] [blame] | 403 | "perf buffer not large enough, wanted %d, have %d", |
| 404 | size, PERF_MAX_TRACE_SIZE)) |
Oleg Nesterov | cd92bf6 | 2013-06-17 19:02:11 +0200 | [diff] [blame] | 405 | return NULL; |
| 406 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 407 | *rctxp = rctx = perf_swevent_get_recursion_context(); |
| 408 | if (rctx < 0) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 409 | return NULL; |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 410 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 411 | if (regs) |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 412 | *regs = this_cpu_ptr(&__perf_regs[rctx]); |
| 413 | raw_data = this_cpu_ptr(perf_trace_buf[rctx]); |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 414 | |
| 415 | /* zero the dead bytes from align to not leak stack to user */ |
Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 416 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 417 | return raw_data; |
| 418 | } |
| 419 | EXPORT_SYMBOL_GPL(perf_trace_buf_alloc); |
| 420 | NOKPROBE_SYMBOL(perf_trace_buf_alloc); |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 421 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 422 | void perf_trace_buf_update(void *record, u16 type) |
| 423 | { |
| 424 | struct trace_entry *entry = record; |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 425 | |
Sebastian Andrzej Siewior | 36590c50 | 2021-01-25 20:45:08 +0100 | [diff] [blame] | 426 | tracing_generic_entry_update(entry, type, tracing_gen_ctx()); |
Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 427 | } |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 428 | NOKPROBE_SYMBOL(perf_trace_buf_update); |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 429 | |
| 430 | #ifdef CONFIG_FUNCTION_TRACER |
| 431 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 432 | perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt (VMware) | d19ad07 | 2020-10-28 17:42:17 -0400 | [diff] [blame] | 433 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 434 | { |
| 435 | struct ftrace_entry *entry; |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 436 | struct perf_event *event; |
| 437 | struct hlist_head head; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 438 | struct pt_regs regs; |
| 439 | int rctx; |
Steven Rostedt (VMware) | 5d15a62 | 2020-11-05 21:32:43 -0500 | [diff] [blame] | 440 | int bit; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 441 | |
Steven Rostedt (VMware) | 5d029b03 | 2020-11-05 21:32:44 -0500 | [diff] [blame] | 442 | if (!rcu_is_watching()) |
| 443 | return; |
| 444 | |
Steven Rostedt (VMware) | 773c167 | 2020-11-05 21:32:46 -0500 | [diff] [blame] | 445 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
Steven Rostedt (VMware) | 5d15a62 | 2020-11-05 21:32:43 -0500 | [diff] [blame] | 446 | if (bit < 0) |
| 447 | return; |
| 448 | |
王贇 | d33cc65 | 2021-10-27 11:15:11 +0800 | [diff] [blame] | 449 | if ((unsigned long)ops->private != smp_processor_id()) |
| 450 | goto out; |
| 451 | |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 452 | event = container_of(ops, struct perf_event, ftrace_ops); |
| 453 | |
| 454 | /* |
| 455 | * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all |
| 456 | * the perf code does is hlist_for_each_entry_rcu(), so we can |
| 457 | * get away with simply setting the @head.first pointer in order |
| 458 | * to create a singular list. |
| 459 | */ |
| 460 | head.first = &event->hlist_entry; |
| 461 | |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 462 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ |
| 463 | sizeof(u64)) - sizeof(u32)) |
| 464 | |
| 465 | BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); |
| 466 | |
Alexei Starovoitov | ec5e099 | 2016-04-06 18:43:22 -0700 | [diff] [blame] | 467 | memset(®s, 0, sizeof(regs)); |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 468 | perf_fetch_caller_regs(®s); |
| 469 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 470 | entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx); |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 471 | if (!entry) |
Steven Rostedt (VMware) | 5d15a62 | 2020-11-05 21:32:43 -0500 | [diff] [blame] | 472 | goto out; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 473 | |
| 474 | entry->ip = ip; |
| 475 | entry->parent_ip = parent_ip; |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 476 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 477 | 1, ®s, &head, NULL); |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 478 | |
Steven Rostedt (VMware) | 5d15a62 | 2020-11-05 21:32:43 -0500 | [diff] [blame] | 479 | out: |
| 480 | ftrace_test_recursion_unlock(bit); |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 481 | #undef ENTRY_SIZE |
| 482 | } |
| 483 | |
| 484 | static int perf_ftrace_function_register(struct perf_event *event) |
| 485 | { |
| 486 | struct ftrace_ops *ops = &event->ftrace_ops; |
| 487 | |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 488 | ops->func = perf_ftrace_function_call; |
| 489 | ops->private = (void *)(unsigned long)nr_cpu_ids; |
| 490 | |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 491 | return register_ftrace_function(ops); |
| 492 | } |
| 493 | |
| 494 | static int perf_ftrace_function_unregister(struct perf_event *event) |
| 495 | { |
| 496 | struct ftrace_ops *ops = &event->ftrace_ops; |
Jiri Olsa | 5500fa5 | 2012-02-15 15:51:54 +0100 | [diff] [blame] | 497 | int ret = unregister_ftrace_function(ops); |
| 498 | ftrace_free_filter(ops); |
| 499 | return ret; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 500 | } |
| 501 | |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 502 | int perf_ftrace_event_register(struct trace_event_call *call, |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 503 | enum trace_reg type, void *data) |
| 504 | { |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 505 | struct perf_event *event = data; |
| 506 | |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 507 | switch (type) { |
| 508 | case TRACE_REG_REGISTER: |
| 509 | case TRACE_REG_UNREGISTER: |
| 510 | break; |
| 511 | case TRACE_REG_PERF_REGISTER: |
| 512 | case TRACE_REG_PERF_UNREGISTER: |
| 513 | return 0; |
| 514 | case TRACE_REG_PERF_OPEN: |
| 515 | return perf_ftrace_function_register(data); |
| 516 | case TRACE_REG_PERF_CLOSE: |
| 517 | return perf_ftrace_function_unregister(data); |
| 518 | case TRACE_REG_PERF_ADD: |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 519 | event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id(); |
| 520 | return 1; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 521 | case TRACE_REG_PERF_DEL: |
Peter Zijlstra | 466c81c | 2017-10-10 17:15:47 +0200 | [diff] [blame] | 522 | event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids; |
| 523 | return 1; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | return -EINVAL; |
| 527 | } |
| 528 | #endif /* CONFIG_FUNCTION_TRACER */ |