blob: 76217bbef8154895b556c46a2ec51374f0ce5d70 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstraac199db2009-03-19 20:26:15 +01002/*
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01003 * trace event based perf event profiling/tracing
Peter Zijlstraac199db2009-03-19 20:26:15 +01004 *
Peter Zijlstra90eec102015-11-16 11:08:45 +01005 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01006 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01007 */
8
Li Zefan558e6542009-08-24 12:19:47 +08009#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +080010#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010011#include "trace.h"
Song Liue12f03d2017-12-06 14:45:15 -080012#include "trace_probe.h"
Peter Zijlstraac199db2009-03-19 20:26:15 +010013
Namhyung Kim6016ee12010-08-11 12:47:59 +090014static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020015
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010016/*
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 * suprises
19 */
20typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 perf_trace_t;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010022
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020023/* Count the events in use (per event id, not per instance) */
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010024static int total_ref_count;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020025
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040026static int perf_trace_event_perm(struct trace_event_call *tp_event,
Frederic Weisbecker61c32652010-11-18 01:39:17 +010027 struct perf_event *p_event)
28{
Peter Zijlstrad5b5f392013-11-14 16:23:04 +010029 if (tp_event->perf_perm) {
30 int ret = tp_event->perf_perm(tp_event, p_event);
31 if (ret)
32 return ret;
33 }
34
Jiri Olsaf4be0732014-07-16 14:33:29 +020035 /*
36 * We checked and allowed to create parent,
37 * allow children without checking.
38 */
39 if (p_event->parent)
40 return 0;
41
42 /*
43 * It's ok to check current process (owner) permissions in here,
44 * because code below is called only via perf_event_open syscall.
45 */
46
Jiri Olsaced39002012-02-15 15:51:52 +010047 /* The ftrace function trace is allowed only for root. */
Jiri Olsacfa77bc2014-03-02 16:56:38 +010048 if (ftrace_event_is_function(tp_event)) {
49 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
50 return -EPERM;
51
Jiri Olsa0a74c5b2016-03-16 15:34:29 +010052 if (!is_sampling_event(p_event))
53 return 0;
54
Jiri Olsacfa77bc2014-03-02 16:56:38 +010055 /*
56 * We don't allow user space callchains for function trace
57 * event, due to issues with page faults while tracing page
58 * fault handler and its overall trickiness nature.
59 */
60 if (!p_event->attr.exclude_callchain_user)
61 return -EINVAL;
Jiri Olsa63c45f42014-03-02 16:56:39 +010062
63 /*
64 * Same reason to disable user stack dump as for user space
65 * callchains above.
66 */
67 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
68 return -EINVAL;
Jiri Olsacfa77bc2014-03-02 16:56:38 +010069 }
Jiri Olsaced39002012-02-15 15:51:52 +010070
Frederic Weisbecker61c32652010-11-18 01:39:17 +010071 /* No tracing, just counting, so no obvious leak */
72 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
73 return 0;
74
75 /* Some events are ok to be traced by non-root users... */
76 if (p_event->attach_state == PERF_ATTACH_TASK) {
77 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
78 return 0;
79 }
80
81 /*
82 * ...otherwise raw tracepoint data can be a severe data leak,
83 * only allow root to have these.
84 */
85 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
86 return -EPERM;
87
88 return 0;
89}
90
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040091static int perf_trace_event_reg(struct trace_event_call *tp_event,
Jiri Olsaceec0b62012-02-15 15:51:49 +010092 struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020093{
Namhyung Kim6016ee12010-08-11 12:47:59 +090094 struct hlist_head __percpu *list;
Jiri Olsaceec0b62012-02-15 15:51:49 +010095 int ret = -ENOMEM;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020096 int cpu;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020097
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020098 p_event->tp_event = tp_event;
99 if (tp_event->perf_refcount++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200100 return 0;
101
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200102 list = alloc_percpu(struct hlist_head);
103 if (!list)
104 goto fail;
105
106 for_each_possible_cpu(cpu)
107 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
108
109 tp_event->perf_events = list;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200110
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100111 if (!total_ref_count) {
Namhyung Kim6016ee12010-08-11 12:47:59 +0900112 char __percpu *buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200113 int i;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200114
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +0200115 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Namhyung Kim6016ee12010-08-11 12:47:59 +0900116 buf = (char __percpu *)alloc_percpu(perf_trace_t);
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200117 if (!buf)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200118 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200119
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200120 perf_trace_buf[i] = buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200121 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200122 }
123
Jiri Olsaceec0b62012-02-15 15:51:49 +0100124 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200125 if (ret)
126 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200127
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200128 total_ref_count++;
129 return 0;
130
131fail:
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200132 if (!total_ref_count) {
133 int i;
134
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +0200135 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200136 free_percpu(perf_trace_buf[i]);
137 perf_trace_buf[i] = NULL;
138 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200139 }
140
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200141 if (!--tp_event->perf_refcount) {
142 free_percpu(tp_event->perf_events);
143 tp_event->perf_events = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +0200144 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200145
146 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200147}
148
Jiri Olsaceec0b62012-02-15 15:51:49 +0100149static void perf_trace_event_unreg(struct perf_event *p_event)
150{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400151 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100152 int i;
153
154 if (--tp_event->perf_refcount > 0)
155 goto out;
156
157 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
158
159 /*
160 * Ensure our callback won't be called anymore. The buffers
161 * will be freed after that.
162 */
163 tracepoint_synchronize_unregister();
164
165 free_percpu(tp_event->perf_events);
166 tp_event->perf_events = NULL;
167
168 if (!--total_ref_count) {
169 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
170 free_percpu(perf_trace_buf[i]);
171 perf_trace_buf[i] = NULL;
172 }
173 }
174out:
175 module_put(tp_event->mod);
176}
177
178static int perf_trace_event_open(struct perf_event *p_event)
179{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400180 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100181 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
182}
183
184static void perf_trace_event_close(struct perf_event *p_event)
185{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400186 struct trace_event_call *tp_event = p_event->tp_event;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100187 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
188}
189
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400190static int perf_trace_event_init(struct trace_event_call *tp_event,
Jiri Olsaceec0b62012-02-15 15:51:49 +0100191 struct perf_event *p_event)
192{
193 int ret;
194
195 ret = perf_trace_event_perm(tp_event, p_event);
196 if (ret)
197 return ret;
198
199 ret = perf_trace_event_reg(tp_event, p_event);
200 if (ret)
201 return ret;
202
203 ret = perf_trace_event_open(p_event);
204 if (ret) {
205 perf_trace_event_unreg(p_event);
206 return ret;
207 }
208
209 return 0;
210}
211
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200212int perf_trace_init(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100213{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400214 struct trace_event_call *tp_event;
Vince Weaver0022ced2013-11-15 12:39:45 -0500215 u64 event_id = p_event->attr.config;
Li Zefan20c89282009-05-06 10:33:45 +0800216 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100217
Li Zefan20c89282009-05-06 10:33:45 +0800218 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200219 list_for_each_entry(tp_event, &ftrace_events, list) {
Steven Rostedtff5f1492010-05-21 11:49:57 -0400220 if (tp_event->event.type == event_id &&
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400221 tp_event->class && tp_event->class->reg &&
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200222 try_module_get(tp_event->mod)) {
223 ret = perf_trace_event_init(tp_event, p_event);
Li Zefan9cb627d2010-09-01 12:58:43 +0200224 if (ret)
225 module_put(tp_event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800226 break;
227 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100228 }
Li Zefan20c89282009-05-06 10:33:45 +0800229 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100230
Li Zefan20c89282009-05-06 10:33:45 +0800231 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100232}
233
Jiri Olsaceec0b62012-02-15 15:51:49 +0100234void perf_trace_destroy(struct perf_event *p_event)
235{
236 mutex_lock(&event_mutex);
237 perf_trace_event_close(p_event);
238 perf_trace_event_unreg(p_event);
239 mutex_unlock(&event_mutex);
240}
241
Song Liue12f03d2017-12-06 14:45:15 -0800242#ifdef CONFIG_KPROBE_EVENTS
243int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
244{
245 int ret;
246 char *func = NULL;
247 struct trace_event_call *tp_event;
248
249 if (p_event->attr.kprobe_func) {
250 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
251 if (!func)
252 return -ENOMEM;
253 ret = strncpy_from_user(
254 func, u64_to_user_ptr(p_event->attr.kprobe_func),
255 KSYM_NAME_LEN);
Masami Hiramatsu5da13ab2018-04-09 21:16:54 +0900256 if (ret == KSYM_NAME_LEN)
257 ret = -E2BIG;
Song Liue12f03d2017-12-06 14:45:15 -0800258 if (ret < 0)
259 goto out;
260
261 if (func[0] == '\0') {
262 kfree(func);
263 func = NULL;
264 }
265 }
266
267 tp_event = create_local_trace_kprobe(
268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269 p_event->attr.probe_offset, is_retprobe);
270 if (IS_ERR(tp_event)) {
271 ret = PTR_ERR(tp_event);
272 goto out;
273 }
274
275 ret = perf_trace_event_init(tp_event, p_event);
276 if (ret)
277 destroy_local_trace_kprobe(tp_event);
278out:
279 kfree(func);
280 return ret;
281}
282
283void perf_kprobe_destroy(struct perf_event *p_event)
284{
285 perf_trace_event_close(p_event);
286 perf_trace_event_unreg(p_event);
287
288 destroy_local_trace_kprobe(p_event->tp_event);
289}
290#endif /* CONFIG_KPROBE_EVENTS */
291
Song Liu33ea4b22017-12-06 14:45:16 -0800292#ifdef CONFIG_UPROBE_EVENTS
Song Liua6ca88b2018-10-01 22:36:36 -0700293int perf_uprobe_init(struct perf_event *p_event,
294 unsigned long ref_ctr_offset, bool is_retprobe)
Song Liu33ea4b22017-12-06 14:45:16 -0800295{
296 int ret;
297 char *path = NULL;
298 struct trace_event_call *tp_event;
299
300 if (!p_event->attr.uprobe_path)
301 return -EINVAL;
302 path = kzalloc(PATH_MAX, GFP_KERNEL);
303 if (!path)
304 return -ENOMEM;
305 ret = strncpy_from_user(
306 path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
Song Liu0eadcc72018-04-09 18:31:30 +0000307 if (ret == PATH_MAX)
308 return -E2BIG;
Song Liu33ea4b22017-12-06 14:45:16 -0800309 if (ret < 0)
310 goto out;
311 if (path[0] == '\0') {
312 ret = -EINVAL;
313 goto out;
314 }
315
Song Liua6ca88b2018-10-01 22:36:36 -0700316 tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
317 ref_ctr_offset, is_retprobe);
Song Liu33ea4b22017-12-06 14:45:16 -0800318 if (IS_ERR(tp_event)) {
319 ret = PTR_ERR(tp_event);
320 goto out;
321 }
322
323 /*
324 * local trace_uprobe need to hold event_mutex to call
325 * uprobe_buffer_enable() and uprobe_buffer_disable().
326 * event_mutex is not required for local trace_kprobes.
327 */
328 mutex_lock(&event_mutex);
329 ret = perf_trace_event_init(tp_event, p_event);
330 if (ret)
331 destroy_local_trace_uprobe(tp_event);
332 mutex_unlock(&event_mutex);
333out:
334 kfree(path);
335 return ret;
336}
337
338void perf_uprobe_destroy(struct perf_event *p_event)
339{
340 mutex_lock(&event_mutex);
341 perf_trace_event_close(p_event);
342 perf_trace_event_unreg(p_event);
343 mutex_unlock(&event_mutex);
344 destroy_local_trace_uprobe(p_event->tp_event);
345}
346#endif /* CONFIG_UPROBE_EVENTS */
347
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200348int perf_trace_add(struct perf_event *p_event, int flags)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200349{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400350 struct trace_event_call *tp_event = p_event->tp_event;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200351
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200352 if (!(flags & PERF_EF_START))
353 p_event->hw.state = PERF_HES_STOPPED;
354
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200355 /*
356 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
357 * and we need to take the default action of enqueueing our event on
358 * the right per-cpu hlist.
359 */
360 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
361 struct hlist_head __percpu *pcpu_list;
362 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200363
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200364 pcpu_list = tp_event->perf_events;
365 if (WARN_ON_ONCE(!pcpu_list))
366 return -EINVAL;
367
368 list = this_cpu_ptr(pcpu_list);
369 hlist_add_head_rcu(&p_event->hlist_entry, list);
370 }
371
372 return 0;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200373}
374
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200375void perf_trace_del(struct perf_event *p_event, int flags)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100376{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400377 struct trace_event_call *tp_event = p_event->tp_event;
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200378
379 /*
380 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
381 * and we need to take the default action of dequeueing our event from
382 * the right per-cpu hlist.
383 */
384 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
385 hlist_del_rcu(&p_event->hlist_entry);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200386}
Peter Zijlstraac199db2009-03-19 20:26:15 +0100387
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700388void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800389{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200390 char *raw_data;
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700391 int rctx;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800392
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100393 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
394
Oleg Nesterovcd92bf62013-06-17 19:02:11 +0200395 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700396 "perf buffer not large enough"))
Oleg Nesterovcd92bf62013-06-17 19:02:11 +0200397 return NULL;
398
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700399 *rctxp = rctx = perf_swevent_get_recursion_context();
400 if (rctx < 0)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200401 return NULL;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800402
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +0100403 if (regs)
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700404 *regs = this_cpu_ptr(&__perf_regs[rctx]);
405 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800406
407 /* zero the dead bytes from align to not leak stack to user */
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100408 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700409 return raw_data;
410}
411EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
412NOKPROBE_SYMBOL(perf_trace_buf_alloc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800413
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700414void perf_trace_buf_update(void *record, u16 type)
415{
416 struct trace_entry *entry = record;
417 int pc = preempt_count();
418 unsigned long flags;
419
Peter Zijlstra87f44bb2010-05-25 11:02:55 +0200420 local_save_flags(flags);
421 tracing_generic_entry_update(entry, flags, pc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800422 entry->type = type;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800423}
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700424NOKPROBE_SYMBOL(perf_trace_buf_update);
Jiri Olsaced39002012-02-15 15:51:52 +0100425
426#ifdef CONFIG_FUNCTION_TRACER
427static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400428perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400429 struct ftrace_ops *ops, struct pt_regs *pt_regs)
Jiri Olsaced39002012-02-15 15:51:52 +0100430{
431 struct ftrace_entry *entry;
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200432 struct perf_event *event;
433 struct hlist_head head;
Jiri Olsaced39002012-02-15 15:51:52 +0100434 struct pt_regs regs;
435 int rctx;
436
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200437 if ((unsigned long)ops->private != smp_processor_id())
Oleg Nesterovb8ebfd32013-06-17 19:02:04 +0200438 return;
439
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200440 event = container_of(ops, struct perf_event, ftrace_ops);
441
442 /*
443 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
444 * the perf code does is hlist_for_each_entry_rcu(), so we can
445 * get away with simply setting the @head.first pointer in order
446 * to create a singular list.
447 */
448 head.first = &event->hlist_entry;
449
Jiri Olsaced39002012-02-15 15:51:52 +0100450#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
451 sizeof(u64)) - sizeof(u32))
452
453 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
454
Alexei Starovoitovec5e0992016-04-06 18:43:22 -0700455 memset(&regs, 0, sizeof(regs));
Jiri Olsaced39002012-02-15 15:51:52 +0100456 perf_fetch_caller_regs(&regs);
457
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700458 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
Jiri Olsaced39002012-02-15 15:51:52 +0100459 if (!entry)
460 return;
461
462 entry->ip = ip;
463 entry->parent_ip = parent_ip;
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -0700464 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200465 1, &regs, &head, NULL);
Jiri Olsaced39002012-02-15 15:51:52 +0100466
467#undef ENTRY_SIZE
468}
469
470static int perf_ftrace_function_register(struct perf_event *event)
471{
472 struct ftrace_ops *ops = &event->ftrace_ops;
473
Peter Zijlstra1dd311e2017-10-11 09:45:31 +0200474 ops->flags = FTRACE_OPS_FL_RCU;
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200475 ops->func = perf_ftrace_function_call;
476 ops->private = (void *)(unsigned long)nr_cpu_ids;
477
Jiri Olsaced39002012-02-15 15:51:52 +0100478 return register_ftrace_function(ops);
479}
480
481static int perf_ftrace_function_unregister(struct perf_event *event)
482{
483 struct ftrace_ops *ops = &event->ftrace_ops;
Jiri Olsa5500fa52012-02-15 15:51:54 +0100484 int ret = unregister_ftrace_function(ops);
485 ftrace_free_filter(ops);
486 return ret;
Jiri Olsaced39002012-02-15 15:51:52 +0100487}
488
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400489int perf_ftrace_event_register(struct trace_event_call *call,
Jiri Olsaced39002012-02-15 15:51:52 +0100490 enum trace_reg type, void *data)
491{
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200492 struct perf_event *event = data;
493
Jiri Olsaced39002012-02-15 15:51:52 +0100494 switch (type) {
495 case TRACE_REG_REGISTER:
496 case TRACE_REG_UNREGISTER:
497 break;
498 case TRACE_REG_PERF_REGISTER:
499 case TRACE_REG_PERF_UNREGISTER:
500 return 0;
501 case TRACE_REG_PERF_OPEN:
502 return perf_ftrace_function_register(data);
503 case TRACE_REG_PERF_CLOSE:
504 return perf_ftrace_function_unregister(data);
505 case TRACE_REG_PERF_ADD:
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200506 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
507 return 1;
Jiri Olsaced39002012-02-15 15:51:52 +0100508 case TRACE_REG_PERF_DEL:
Peter Zijlstra466c81c2017-10-10 17:15:47 +0200509 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
510 return 1;
Jiri Olsaced39002012-02-15 15:51:52 +0100511 }
512
513 return -EINVAL;
514}
515#endif /* CONFIG_FUNCTION_TRACER */