blob: a114549720d632be1a76d2f83ce0921873aabf98 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstraac199db2009-03-19 20:26:15 +01002/*
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01003 * trace event based perf event profiling/tracing
Peter Zijlstraac199db2009-03-19 20:26:15 +01004 *
Peter Zijlstra90eec102015-11-16 11:08:45 +01005 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01006 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01007 */
8
Li Zefan558e6542009-08-24 12:19:47 +08009#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +080010#include <linux/kprobes.h>
Joel Fernandes (Google)da97e182019-10-14 13:03:08 -040011#include <linux/security.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010012#include "trace.h"
Song Liue12f03d2017-12-06 14:45:15 -080013#include "trace_probe.h"
Peter Zijlstraac199db2009-03-19 20:26:15 +010014
Namhyung Kim6016ee12010-08-11 12:47:59 +090015static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020016
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010017/*
18 * Force it to be aligned to unsigned long to avoid misaligned accesses
Ingo Molnarf2cc0202021-03-23 18:49:35 +010019 * surprises
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010020 */
21typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
22 perf_trace_t;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010023
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020024/* Count the events in use (per event id, not per instance) */
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010025static int total_ref_count;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020026
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040027static int perf_trace_event_perm(struct trace_event_call *tp_event,
Frederic Weisbecker61c32652010-11-18 01:39:17 +010028 struct perf_event *p_event)
29{
Joel Fernandes (Google)da97e182019-10-14 13:03:08 -040030 int ret;
31
Peter Zijlstrad5b5f392013-11-14 16:23:04 +010032 if (tp_event->perf_perm) {
Joel Fernandes (Google)da97e182019-10-14 13:03:08 -040033 ret = tp_event->perf_perm(tp_event, p_event);
Peter Zijlstrad5b5f392013-11-14 16:23:04 +010034 if (ret)
35 return ret;
36 }
37
Jiri Olsaf4be0732014-07-16 14:33:29 +020038 /*
39 * We checked and allowed to create parent,
40 * allow children without checking.
41 */
42 if (p_event->parent)
43 return 0;
44
45 /*
46 * It's ok to check current process (owner) permissions in here,
47 * because code below is called only via perf_event_open syscall.
48 */
49
Jiri Olsaced39002012-02-15 15:51:52 +010050 /* The ftrace function trace is allowed only for root. */
Jiri Olsacfa77bc2014-03-02 16:56:38 +010051 if (ftrace_event_is_function(tp_event)) {
Joel Fernandes (Google)da97e182019-10-14 13:03:08 -040052 ret = perf_allow_tracepoint(&p_event->attr);
53 if (ret)
54 return ret;
Jiri Olsacfa77bc2014-03-02 16:56:38 +010055
Jiri Olsa0a74c5b2016-03-16 15:34:29 +010056 if (!is_sampling_event(p_event))
57 return 0;
58
Jiri Olsacfa77bc2014-03-02 16:56:38 +010059 /*
60 * We don't allow user space callchains for function trace
61 * event, due to issues with page faults while tracing page
62 * fault handler and its overall trickiness nature.
63 */
64 if (!p_event->attr.exclude_callchain_user)
65 return -EINVAL;
Jiri Olsa63c45f42014-03-02 16:56:39 +010066
67 /*
68 * Same reason to disable user stack dump as for user space
69 * callchains above.
70 */
71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
72 return -EINVAL;
Jiri Olsacfa77bc2014-03-02 16:56:38 +010073 }
Jiri Olsaced39002012-02-15 15:51:52 +010074
Frederic Weisbecker61c32652010-11-18 01:39:17 +010075 /* No tracing, just counting, so no obvious leak */
76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
77 return 0;
78
79 /* Some events are ok to be traced by non-root users... */
80 if (p_event->attach_state == PERF_ATTACH_TASK) {
81 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
82 return 0;
83 }
84
85 /*
86 * ...otherwise raw tracepoint data can be a severe data leak,
87 * only allow root to have these.
88 */
Joel Fernandes (Google)da97e182019-10-14 13:03:08 -040089 ret = perf_allow_tracepoint(&p_event->attr);
90 if (ret)
91 return ret;
Frederic Weisbecker61c32652010-11-18 01:39:17 +010092
93 return 0;
94}
95
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040096static int perf_trace_event_reg(struct trace_event_call *tp_event,
Jiri Olsaceec0b62012-02-15 15:51:49 +010097 struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020098{
Namhyung Kim6016ee12010-08-11 12:47:59 +090099 struct hlist_head __percpu *list;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100100 int ret = -ENOMEM;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200101 int cpu;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200102
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200103 p_event->tp_event = tp_event;
104 if (tp_event->perf_refcount++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200105 return 0;
106
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200107 list = alloc_percpu(struct hlist_head);
108 if (!list)
109 goto fail;
110
111 for_each_possible_cpu(cpu)
112 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
113
114 tp_event->perf_events = list;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200115
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100116 if (!total_ref_count) {
Namhyung Kim6016ee12010-08-11 12:47:59 +0900117 char __percpu *buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200118 int i;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200119
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +0200120 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Namhyung Kim6016ee12010-08-11 12:47:59 +0900121 buf = (char __percpu *)alloc_percpu(perf_trace_t);
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200122 if (!buf)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200123 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200124
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200125 perf_trace_buf[i] = buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200126 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200127 }
128
Jiri Olsaceec0b62012-02-15 15:51:49 +0100129 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200130 if (ret)
131 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200132
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200133 total_ref_count++;
134 return 0;
135
136fail:
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200137 if (!total_ref_count) {
138 int i;
139
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +0200140 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200141 free_percpu(perf_trace_buf[i]);
142 perf_trace_buf[i] = NULL;
143 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200144 }
145
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200146 if (!--tp_event->perf_refcount) {
147 free_percpu(tp_event->perf_events);
148 tp_event->perf_events = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +0200149 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200150
151 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200152}
153
Jiri Olsaceec0b62012-02-15 15:51:49 +0100154static void perf_trace_event_unreg(struct perf_event *p_event)
155{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400156 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100157 int i;
158
159 if (--tp_event->perf_refcount > 0)
160 goto out;
161
162 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
163
164 /*
165 * Ensure our callback won't be called anymore. The buffers
166 * will be freed after that.
167 */
168 tracepoint_synchronize_unregister();
169
170 free_percpu(tp_event->perf_events);
171 tp_event->perf_events = NULL;
172
173 if (!--total_ref_count) {
174 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
175 free_percpu(perf_trace_buf[i]);
176 perf_trace_buf[i] = NULL;
177 }
178 }
179out:
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -0400180 trace_event_put_ref(tp_event);
Jiri Olsaceec0b62012-02-15 15:51:49 +0100181}
182
183static int perf_trace_event_open(struct perf_event *p_event)
184{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400185 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100186 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
187}
188
189static void perf_trace_event_close(struct perf_event *p_event)
190{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400191 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100192 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
193}
194
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400195static int perf_trace_event_init(struct trace_event_call *tp_event,
Jiri Olsaceec0b62012-02-15 15:51:49 +0100196 struct perf_event *p_event)
197{
198 int ret;
199
200 ret = perf_trace_event_perm(tp_event, p_event);
201 if (ret)
202 return ret;
203
204 ret = perf_trace_event_reg(tp_event, p_event);
205 if (ret)
206 return ret;
207
208 ret = perf_trace_event_open(p_event);
209 if (ret) {
210 perf_trace_event_unreg(p_event);
211 return ret;
212 }
213
214 return 0;
215}
216
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200217int perf_trace_init(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100218{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400219 struct trace_event_call *tp_event;
Vince Weaver0022ced2013-11-15 12:39:45 -0500220 u64 event_id = p_event->attr.config;
Li Zefan20c89282009-05-06 10:33:45 +0800221 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100222
Li Zefan20c89282009-05-06 10:33:45 +0800223 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200224 list_for_each_entry(tp_event, &ftrace_events, list) {
Steven Rostedtff5f1492010-05-21 11:49:57 -0400225 if (tp_event->event.type == event_id &&
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400226 tp_event->class && tp_event->class->reg &&
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -0400227 trace_event_try_get_ref(tp_event)) {
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200228 ret = perf_trace_event_init(tp_event, p_event);
Li Zefan9cb627d2010-09-01 12:58:43 +0200229 if (ret)
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -0400230 trace_event_put_ref(tp_event);
Li Zefan20c89282009-05-06 10:33:45 +0800231 break;
232 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100233 }
Li Zefan20c89282009-05-06 10:33:45 +0800234 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100235
Li Zefan20c89282009-05-06 10:33:45 +0800236 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100237}
238
Jiri Olsaceec0b62012-02-15 15:51:49 +0100239void perf_trace_destroy(struct perf_event *p_event)
240{
241 mutex_lock(&event_mutex);
242 perf_trace_event_close(p_event);
243 perf_trace_event_unreg(p_event);
244 mutex_unlock(&event_mutex);
245}
246
Song Liue12f03d2017-12-06 14:45:15 -0800247#ifdef CONFIG_KPROBE_EVENTS
248int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
249{
250 int ret;
251 char *func = NULL;
252 struct trace_event_call *tp_event;
253
254 if (p_event->attr.kprobe_func) {
255 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
256 if (!func)
257 return -ENOMEM;
258 ret = strncpy_from_user(
259 func, u64_to_user_ptr(p_event->attr.kprobe_func),
260 KSYM_NAME_LEN);
Masami Hiramatsu5da13ab2018-04-09 21:16:54 +0900261 if (ret == KSYM_NAME_LEN)
262 ret = -E2BIG;
Song Liue12f03d2017-12-06 14:45:15 -0800263 if (ret < 0)
264 goto out;
265
266 if (func[0] == '\0') {
267 kfree(func);
268 func = NULL;
269 }
270 }
271
272 tp_event = create_local_trace_kprobe(
273 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
274 p_event->attr.probe_offset, is_retprobe);
275 if (IS_ERR(tp_event)) {
276 ret = PTR_ERR(tp_event);
277 goto out;
278 }
279
Prateek Sood6b1340c2019-10-15 11:47:25 +0530280 mutex_lock(&event_mutex);
Song Liue12f03d2017-12-06 14:45:15 -0800281 ret = perf_trace_event_init(tp_event, p_event);
282 if (ret)
283 destroy_local_trace_kprobe(tp_event);
Prateek Sood6b1340c2019-10-15 11:47:25 +0530284 mutex_unlock(&event_mutex);
Song Liue12f03d2017-12-06 14:45:15 -0800285out:
286 kfree(func);
287 return ret;
288}
289
290void perf_kprobe_destroy(struct perf_event *p_event)
291{
Prateek Sood6b1340c2019-10-15 11:47:25 +0530292 mutex_lock(&event_mutex);
Song Liue12f03d2017-12-06 14:45:15 -0800293 perf_trace_event_close(p_event);
294 perf_trace_event_unreg(p_event);
Prateek Sood6b1340c2019-10-15 11:47:25 +0530295 mutex_unlock(&event_mutex);
Song Liue12f03d2017-12-06 14:45:15 -0800296
297 destroy_local_trace_kprobe(p_event->tp_event);
298}
299#endif /* CONFIG_KPROBE_EVENTS */
300
Song Liu33ea4b22017-12-06 14:45:16 -0800301#ifdef CONFIG_UPROBE_EVENTS
Song Liua6ca88b2018-10-01 22:36:36 -0700302int perf_uprobe_init(struct perf_event *p_event,
303 unsigned long ref_ctr_offset, bool is_retprobe)
Song Liu33ea4b22017-12-06 14:45:16 -0800304{
305 int ret;
306 char *path = NULL;
307 struct trace_event_call *tp_event;
308
309 if (!p_event->attr.uprobe_path)
310 return -EINVAL;
Jann Horn83540fbc2019-02-20 17:54:43 +0100311
312 path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
313 PATH_MAX);
314 if (IS_ERR(path)) {
315 ret = PTR_ERR(path);
316 return (ret == -EINVAL) ? -E2BIG : ret;
317 }
Song Liu33ea4b22017-12-06 14:45:16 -0800318 if (path[0] == '\0') {
319 ret = -EINVAL;
320 goto out;
321 }
322
Song Liua6ca88b2018-10-01 22:36:36 -0700323 tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
324 ref_ctr_offset, is_retprobe);
Song Liu33ea4b22017-12-06 14:45:16 -0800325 if (IS_ERR(tp_event)) {
326 ret = PTR_ERR(tp_event);
327 goto out;
328 }
329
330 /*
331 * local trace_uprobe need to hold event_mutex to call
332 * uprobe_buffer_enable() and uprobe_buffer_disable().
333 * event_mutex is not required for local trace_kprobes.
334 */
335 mutex_lock(&event_mutex);
336 ret = perf_trace_event_init(tp_event, p_event);
337 if (ret)
338 destroy_local_trace_uprobe(tp_event);
339 mutex_unlock(&event_mutex);
340out:
341 kfree(path);
342 return ret;
343}
344
345void perf_uprobe_destroy(struct perf_event *p_event)
346{
347 mutex_lock(&event_mutex);
348 perf_trace_event_close(p_event);
349 perf_trace_event_unreg(p_event);
350 mutex_unlock(&event_mutex);
351 destroy_local_trace_uprobe(p_event->tp_event);
352}
353#endif /* CONFIG_UPROBE_EVENTS */
354
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200355int perf_trace_add(struct perf_event *p_event, int flags)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200356{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400357 struct trace_event_call *tp_event = p_event->tp_event;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200358
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200359 if (!(flags & PERF_EF_START))
360 p_event->hw.state = PERF_HES_STOPPED;
361
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200362 /*
363 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
364 * and we need to take the default action of enqueueing our event on
365 * the right per-cpu hlist.
366 */
367 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
368 struct hlist_head __percpu *pcpu_list;
369 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200370
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200371 pcpu_list = tp_event->perf_events;
372 if (WARN_ON_ONCE(!pcpu_list))
373 return -EINVAL;
374
375 list = this_cpu_ptr(pcpu_list);
376 hlist_add_head_rcu(&p_event->hlist_entry, list);
377 }
378
379 return 0;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200380}
381
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200382void perf_trace_del(struct perf_event *p_event, int flags)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100383{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400384 struct trace_event_call *tp_event = p_event->tp_event;
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200385
386 /*
387 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
388 * and we need to take the default action of dequeueing our event from
389 * the right per-cpu hlist.
390 */
391 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
392 hlist_del_rcu(&p_event->hlist_entry);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200393}
Peter Zijlstraac199db2009-03-19 20:26:15 +0100394
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700395void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800396{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200397 char *raw_data;
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700398 int rctx;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800399
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100400 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
401
Oleg Nesterovcd92bf62013-06-17 19:02:11 +0200402 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
Robin H. Johnsona90afe82021-08-30 21:37:22 -0700403 "perf buffer not large enough, wanted %d, have %d",
404 size, PERF_MAX_TRACE_SIZE))
Oleg Nesterovcd92bf62013-06-17 19:02:11 +0200405 return NULL;
406
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700407 *rctxp = rctx = perf_swevent_get_recursion_context();
408 if (rctx < 0)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200409 return NULL;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800410
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +0100411 if (regs)
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700412 *regs = this_cpu_ptr(&__perf_regs[rctx]);
413 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800414
415 /* zero the dead bytes from align to not leak stack to user */
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100416 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700417 return raw_data;
418}
419EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
420NOKPROBE_SYMBOL(perf_trace_buf_alloc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800421
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700422void perf_trace_buf_update(void *record, u16 type)
423{
424 struct trace_entry *entry = record;
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700425
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100426 tracing_generic_entry_update(entry, type, tracing_gen_ctx());
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800427}
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700428NOKPROBE_SYMBOL(perf_trace_buf_update);
Jiri Olsaced39002012-02-15 15:51:52 +0100429
430#ifdef CONFIG_FUNCTION_TRACER
431static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400432perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)d19ad072020-10-28 17:42:17 -0400433 struct ftrace_ops *ops, struct ftrace_regs *fregs)
Jiri Olsaced39002012-02-15 15:51:52 +0100434{
435 struct ftrace_entry *entry;
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200436 struct perf_event *event;
437 struct hlist_head head;
Jiri Olsaced39002012-02-15 15:51:52 +0100438 struct pt_regs regs;
439 int rctx;
Steven Rostedt (VMware)5d15a622020-11-05 21:32:43 -0500440 int bit;
Jiri Olsaced39002012-02-15 15:51:52 +0100441
Steven Rostedt (VMware)5d029b032020-11-05 21:32:44 -0500442 if (!rcu_is_watching())
443 return;
444
Steven Rostedt (VMware)773c1672020-11-05 21:32:46 -0500445 bit = ftrace_test_recursion_trylock(ip, parent_ip);
Steven Rostedt (VMware)5d15a622020-11-05 21:32:43 -0500446 if (bit < 0)
447 return;
448
王贇d33cc652021-10-27 11:15:11 +0800449 if ((unsigned long)ops->private != smp_processor_id())
450 goto out;
451
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200452 event = container_of(ops, struct perf_event, ftrace_ops);
453
454 /*
455 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
456 * the perf code does is hlist_for_each_entry_rcu(), so we can
457 * get away with simply setting the @head.first pointer in order
458 * to create a singular list.
459 */
460 head.first = &event->hlist_entry;
461
Jiri Olsaced39002012-02-15 15:51:52 +0100462#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
463 sizeof(u64)) - sizeof(u32))
464
465 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
466
Alexei Starovoitovec5e0992016-04-06 18:43:22 -0700467 memset(&regs, 0, sizeof(regs));
Jiri Olsaced39002012-02-15 15:51:52 +0100468 perf_fetch_caller_regs(&regs);
469
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700470 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
Jiri Olsaced39002012-02-15 15:51:52 +0100471 if (!entry)
Steven Rostedt (VMware)5d15a622020-11-05 21:32:43 -0500472 goto out;
Jiri Olsaced39002012-02-15 15:51:52 +0100473
474 entry->ip = ip;
475 entry->parent_ip = parent_ip;
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700476 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200477 1, &regs, &head, NULL);
Jiri Olsaced39002012-02-15 15:51:52 +0100478
Steven Rostedt (VMware)5d15a622020-11-05 21:32:43 -0500479out:
480 ftrace_test_recursion_unlock(bit);
Jiri Olsaced39002012-02-15 15:51:52 +0100481#undef ENTRY_SIZE
482}
483
484static int perf_ftrace_function_register(struct perf_event *event)
485{
486 struct ftrace_ops *ops = &event->ftrace_ops;
487
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200488 ops->func = perf_ftrace_function_call;
489 ops->private = (void *)(unsigned long)nr_cpu_ids;
490
Jiri Olsaced39002012-02-15 15:51:52 +0100491 return register_ftrace_function(ops);
492}
493
494static int perf_ftrace_function_unregister(struct perf_event *event)
495{
496 struct ftrace_ops *ops = &event->ftrace_ops;
Jiri Olsa5500fa52012-02-15 15:51:54 +0100497 int ret = unregister_ftrace_function(ops);
498 ftrace_free_filter(ops);
499 return ret;
Jiri Olsaced39002012-02-15 15:51:52 +0100500}
501
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400502int perf_ftrace_event_register(struct trace_event_call *call,
Jiri Olsaced39002012-02-15 15:51:52 +0100503 enum trace_reg type, void *data)
504{
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200505 struct perf_event *event = data;
506
Jiri Olsaced39002012-02-15 15:51:52 +0100507 switch (type) {
508 case TRACE_REG_REGISTER:
509 case TRACE_REG_UNREGISTER:
510 break;
511 case TRACE_REG_PERF_REGISTER:
512 case TRACE_REG_PERF_UNREGISTER:
513 return 0;
514 case TRACE_REG_PERF_OPEN:
515 return perf_ftrace_function_register(data);
516 case TRACE_REG_PERF_CLOSE:
517 return perf_ftrace_function_unregister(data);
518 case TRACE_REG_PERF_ADD:
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200519 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
520 return 1;
Jiri Olsaced39002012-02-15 15:51:52 +0100521 case TRACE_REG_PERF_DEL:
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200522 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
523 return 1;
Jiri Olsaced39002012-02-15 15:51:52 +0100524 }
525
526 return -EINVAL;
527}
528#endif /* CONFIG_FUNCTION_TRACER */