blob: bc6c6ec5e79178e02c813bb47ce0349b5cb6e09e [file] [log] [blame]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/uprobes.h>
24#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080025#include <linux/string.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053026
27#include "trace_probe.h"
28
29#define UPROBE_EVENT_SYSTEM "uprobes"
30
Oleg Nesterov457d1772013-03-29 18:26:51 +010031struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
Oleg Nesterov736288b2013-02-03 20:58:35 +010043struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47};
48
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053049/*
50 * uprobe event core functions
51 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052struct trace_uprobe {
53 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010054 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010055 struct uprobe_consumer consumer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053056 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090060 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053061};
62
Namhyung Kim14577c32013-07-03 15:42:53 +090063#define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065 (sizeof(struct probe_arg) * (n)))
66
67static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040068static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053069
70static DEFINE_MUTEX(uprobe_lock);
71static LIST_HEAD(uprobe_list);
72
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090073struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
76};
77
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053078static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010079static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053081
Namhyung Kim3fd996a2013-11-26 15:21:04 +090082#ifdef CONFIG_STACK_GROWSUP
83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84{
85 return addr - (n * sizeof(long));
86}
87#else
88static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89{
90 return addr + (n * sizeof(long));
91}
92#endif
93
94static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95{
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
98
99 addr = adjust_stack_addr(addr, n);
100
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
103
104 return ret;
105}
106
107/*
108 * Uprobes-specific fetch functions
109 */
110#define DEFINE_FETCH_stack(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900113{ \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
116}
117DEFINE_BASIC_FETCH_FUNCS(stack)
118/* No string on the stack entry */
119#define fetch_stack_string NULL
120#define fetch_stack_string_size NULL
121
Namhyung Kim5baaa592013-11-26 15:21:04 +0900122#define DEFINE_FETCH_memory(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \
Namhyung Kim5baaa592013-11-26 15:21:04 +0900125{ \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
128 \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
133}
134DEFINE_BASIC_FETCH_FUNCS(memory)
135/*
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
138 */
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900141{
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
147
148 if (!maxlen)
149 return;
150
151 ret = strncpy_from_user(dst, src, maxlen);
152
153 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0';
155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 } else {
157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 }
159}
160
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900163{
164 int len;
165 void __user *vaddr = (void __force __user *) addr;
166
167 len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170 *(u32 *)dest = 0;
171 else
172 *(u32 *)dest = len;
173}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900174
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900175static unsigned long translate_user_vaddr(void *file_offset)
176{
177 unsigned long base_addr;
178 struct uprobe_dispatch_data *udd;
179
180 udd = (void *) current->utask->vaddr;
181
182 base_addr = udd->bp_addr - udd->tu->offset;
183 return base_addr + (unsigned long)file_offset;
184}
185
186#define DEFINE_FETCH_file_offset(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest)\
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900189{ \
190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \
192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
193}
194DEFINE_BASIC_FETCH_FUNCS(file_offset)
195DEFINE_FETCH_file_offset(string)
196DEFINE_FETCH_file_offset(string_size)
197
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900198/* Fetch type information table */
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100199static const struct fetch_type uprobes_fetch_type_table[] = {
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900200 /* Special types */
201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 sizeof(u32), 1, "__data_loc char[]"),
203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 string_size, sizeof(u32), 0, "u32"),
205 /* Basic types */
206 ASSIGN_FETCH_TYPE(u8, u8, 0),
207 ASSIGN_FETCH_TYPE(u16, u16, 0),
208 ASSIGN_FETCH_TYPE(u32, u32, 0),
209 ASSIGN_FETCH_TYPE(u64, u64, 0),
210 ASSIGN_FETCH_TYPE(s8, u8, 1),
211 ASSIGN_FETCH_TYPE(s16, u16, 1),
212 ASSIGN_FETCH_TYPE(s32, u32, 1),
213 ASSIGN_FETCH_TYPE(s64, u64, 1),
Masami Hiramatsu17ce3dc2016-08-18 17:57:50 +0900214 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
215 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
216 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
217 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900218
219 ASSIGN_FETCH_TYPE_END
220};
221
Oleg Nesterov736288b2013-02-03 20:58:35 +0100222static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
223{
224 rwlock_init(&filter->rwlock);
225 filter->nr_systemwide = 0;
226 INIT_LIST_HEAD(&filter->perf_events);
227}
228
229static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
230{
231 return !filter->nr_systemwide && list_empty(&filter->perf_events);
232}
233
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100234static inline bool is_ret_probe(struct trace_uprobe *tu)
235{
236 return tu->consumer.ret_handler != NULL;
237}
238
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530239/*
240 * Allocate new trace_uprobe and initialize it (including uprobes).
241 */
242static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100243alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530244{
245 struct trace_uprobe *tu;
246
247 if (!event || !is_good_name(event))
248 return ERR_PTR(-EINVAL);
249
250 if (!group || !is_good_name(group))
251 return ERR_PTR(-EINVAL);
252
253 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
254 if (!tu)
255 return ERR_PTR(-ENOMEM);
256
Namhyung Kim14577c32013-07-03 15:42:53 +0900257 tu->tp.call.class = &tu->tp.class;
258 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
259 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530260 goto error;
261
Namhyung Kim14577c32013-07-03 15:42:53 +0900262 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
263 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530264 goto error;
265
266 INIT_LIST_HEAD(&tu->list);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900267 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100268 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100269 if (is_ret)
270 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100271 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530272 return tu;
273
274error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900275 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530276 kfree(tu);
277
278 return ERR_PTR(-ENOMEM);
279}
280
281static void free_trace_uprobe(struct trace_uprobe *tu)
282{
283 int i;
284
Namhyung Kim14577c32013-07-03 15:42:53 +0900285 for (i = 0; i < tu->tp.nr_args; i++)
286 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530287
288 iput(tu->inode);
Namhyung Kim14577c32013-07-03 15:42:53 +0900289 kfree(tu->tp.call.class->system);
290 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530291 kfree(tu->filename);
292 kfree(tu);
293}
294
295static struct trace_uprobe *find_probe_event(const char *event, const char *group)
296{
297 struct trace_uprobe *tu;
298
299 list_for_each_entry(tu, &uprobe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400300 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900301 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530302 return tu;
303
304 return NULL;
305}
306
307/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400308static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530309{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400310 int ret;
311
312 ret = unregister_uprobe_event(tu);
313 if (ret)
314 return ret;
315
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530316 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530317 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400318 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530319}
320
321/* Register a trace_uprobe and probe_event */
322static int register_trace_uprobe(struct trace_uprobe *tu)
323{
Namhyung Kim14577c32013-07-03 15:42:53 +0900324 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530325 int ret;
326
327 mutex_lock(&uprobe_lock);
328
329 /* register as an event */
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400330 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400331 tu->tp.call.class->system);
Namhyung Kim14577c32013-07-03 15:42:53 +0900332 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530333 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900334 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400335 if (ret)
336 goto end;
337 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530338
339 ret = register_uprobe_event(tu);
340 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700341 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530342 goto end;
343 }
344
345 list_add_tail(&tu->list, &uprobe_list);
346
347end:
348 mutex_unlock(&uprobe_lock);
349
350 return ret;
351}
352
353/*
354 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900355 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 *
357 * - Remove uprobe: -:[GRP/]EVENT
358 */
359static int create_trace_uprobe(int argc, char **argv)
360{
361 struct trace_uprobe *tu;
362 struct inode *inode;
363 char *arg, *event, *group, *filename;
364 char buf[MAX_EVENT_NAME_LEN];
365 struct path path;
366 unsigned long offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100367 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530368 int i, ret;
369
370 inode = NULL;
371 ret = 0;
372 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100373 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530374 event = NULL;
375 group = NULL;
376
377 /* argc must be >= 1 */
378 if (argv[0][0] == '-')
379 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100380 else if (argv[0][0] == 'r')
381 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530382 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100383 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530384 return -EINVAL;
385 }
386
387 if (argv[0][1] == ':') {
388 event = &argv[0][2];
389 arg = strchr(event, '/');
390
391 if (arg) {
392 group = event;
393 event = arg + 1;
394 event[-1] = '\0';
395
396 if (strlen(group) == 0) {
397 pr_info("Group name is not specified\n");
398 return -EINVAL;
399 }
400 }
401 if (strlen(event) == 0) {
402 pr_info("Event name is not specified\n");
403 return -EINVAL;
404 }
405 }
406 if (!group)
407 group = UPROBE_EVENT_SYSTEM;
408
409 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400410 int ret;
411
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530412 if (!event) {
413 pr_info("Delete command needs an event name.\n");
414 return -EINVAL;
415 }
416 mutex_lock(&uprobe_lock);
417 tu = find_probe_event(event, group);
418
419 if (!tu) {
420 mutex_unlock(&uprobe_lock);
421 pr_info("Event %s/%s doesn't exist.\n", group, event);
422 return -ENOENT;
423 }
424 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400425 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530426 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400427 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530428 }
429
430 if (argc < 2) {
431 pr_info("Probe point is not specified.\n");
432 return -EINVAL;
433 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530434 arg = strchr(argv[1], ':');
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800435 if (!arg) {
436 ret = -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530437 goto fail_address_parse;
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800438 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530439
440 *arg++ = '\0';
441 filename = argv[1];
442 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
443 if (ret)
444 goto fail_address_parse;
445
David Howells7682c912015-03-17 22:26:16 +0000446 inode = igrab(d_inode(path.dentry));
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100447 path_put(&path);
448
Oleg Nesterov7e4e28c2013-01-28 17:08:47 +0100449 if (!inode || !S_ISREG(inode->i_mode)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800450 ret = -EINVAL;
451 goto fail_address_parse;
452 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530453
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100454 ret = kstrtoul(arg, 0, &offset);
455 if (ret)
456 goto fail_address_parse;
457
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530458 argc -= 2;
459 argv += 2;
460
461 /* setup a probe */
462 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800463 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530464 char *ptr;
465
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800466 tail = kstrdup(kbasename(filename), GFP_KERNEL);
467 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530468 ret = -ENOMEM;
469 goto fail_address_parse;
470 }
471
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530472 ptr = strpbrk(tail, ".-_");
473 if (ptr)
474 *ptr = '\0';
475
476 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
477 event = buf;
478 kfree(tail);
479 }
480
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100481 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530482 if (IS_ERR(tu)) {
483 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
484 ret = PTR_ERR(tu);
485 goto fail_address_parse;
486 }
487 tu->offset = offset;
488 tu->inode = inode;
489 tu->filename = kstrdup(filename, GFP_KERNEL);
490
491 if (!tu->filename) {
492 pr_info("Failed to allocate filename.\n");
493 ret = -ENOMEM;
494 goto error;
495 }
496
497 /* parse arguments */
498 ret = 0;
499 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900500 struct probe_arg *parg = &tu->tp.args[i];
501
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530502 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900503 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530504
505 /* Parse argument name */
506 arg = strchr(argv[i], '=');
507 if (arg) {
508 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900509 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530510 } else {
511 arg = argv[i];
512 /* If argument name is omitted, set "argN" */
513 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900514 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530515 }
516
Namhyung Kim14577c32013-07-03 15:42:53 +0900517 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530518 pr_info("Failed to allocate argument[%d] name.\n", i);
519 ret = -ENOMEM;
520 goto error;
521 }
522
Namhyung Kim14577c32013-07-03 15:42:53 +0900523 if (!is_good_name(parg->name)) {
524 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530525 ret = -EINVAL;
526 goto error;
527 }
528
Namhyung Kim14577c32013-07-03 15:42:53 +0900529 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530530 pr_info("Argument[%d] name '%s' conflicts with "
531 "another field.\n", i, argv[i]);
532 ret = -EINVAL;
533 goto error;
534 }
535
536 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900537 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100538 is_return, false,
539 uprobes_fetch_type_table);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530540 if (ret) {
541 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
542 goto error;
543 }
544 }
545
546 ret = register_trace_uprobe(tu);
547 if (ret)
548 goto error;
549 return 0;
550
551error:
552 free_trace_uprobe(tu);
553 return ret;
554
555fail_address_parse:
Markus Elfring16a8ef22014-11-16 14:46:28 +0100556 iput(inode);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530557
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800558 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530559
560 return ret;
561}
562
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400563static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530564{
565 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400566 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530567
568 mutex_lock(&uprobe_lock);
569 while (!list_empty(&uprobe_list)) {
570 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400571 ret = unregister_trace_uprobe(tu);
572 if (ret)
573 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530574 }
575 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400576 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530577}
578
579/* Probes listing interfaces */
580static void *probes_seq_start(struct seq_file *m, loff_t *pos)
581{
582 mutex_lock(&uprobe_lock);
583 return seq_list_start(&uprobe_list, *pos);
584}
585
586static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
587{
588 return seq_list_next(v, &uprobe_list, pos);
589}
590
591static void probes_seq_stop(struct seq_file *m, void *v)
592{
593 mutex_unlock(&uprobe_lock);
594}
595
596static int probes_seq_show(struct seq_file *m, void *v)
597{
598 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100599 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530600 int i;
601
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400603 trace_event_name(&tu->tp.call));
Wang Nana2fb3382015-08-26 10:57:46 +0000604 seq_printf(m, " %s:", tu->filename);
605
606 /* Don't print "0x (null)" when offset is 0 */
607 if (tu->offset) {
608 seq_printf(m, "0x%p", (void *)tu->offset);
609 } else {
610 switch (sizeof(void *)) {
611 case 4:
612 seq_printf(m, "0x00000000");
613 break;
614 case 8:
615 default:
616 seq_printf(m, "0x0000000000000000");
617 break;
618 }
619 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530620
Namhyung Kim14577c32013-07-03 15:42:53 +0900621 for (i = 0; i < tu->tp.nr_args; i++)
622 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530623
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100624 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530625 return 0;
626}
627
628static const struct seq_operations probes_seq_op = {
629 .start = probes_seq_start,
630 .next = probes_seq_next,
631 .stop = probes_seq_stop,
632 .show = probes_seq_show
633};
634
635static int probes_open(struct inode *inode, struct file *file)
636{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400637 int ret;
638
639 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
640 ret = cleanup_all_probes();
641 if (ret)
642 return ret;
643 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530644
645 return seq_open(file, &probes_seq_op);
646}
647
648static ssize_t probes_write(struct file *file, const char __user *buffer,
649 size_t count, loff_t *ppos)
650{
651 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
652}
653
654static const struct file_operations uprobe_events_ops = {
655 .owner = THIS_MODULE,
656 .open = probes_open,
657 .read = seq_read,
658 .llseek = seq_lseek,
659 .release = seq_release,
660 .write = probes_write,
661};
662
663/* Probes profiling interfaces */
664static int probes_profile_seq_show(struct seq_file *m, void *v)
665{
666 struct trace_uprobe *tu = v;
667
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400668 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400669 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530670 return 0;
671}
672
673static const struct seq_operations profile_seq_op = {
674 .start = probes_seq_start,
675 .next = probes_seq_next,
676 .stop = probes_seq_stop,
677 .show = probes_profile_seq_show
678};
679
680static int profile_open(struct inode *inode, struct file *file)
681{
682 return seq_open(file, &profile_seq_op);
683}
684
685static const struct file_operations uprobe_profile_ops = {
686 .owner = THIS_MODULE,
687 .open = profile_open,
688 .read = seq_read,
689 .llseek = seq_lseek,
690 .release = seq_release,
691};
692
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900693struct uprobe_cpu_buffer {
694 struct mutex mutex;
695 void *buf;
696};
697static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
698static int uprobe_buffer_refcnt;
699
700static int uprobe_buffer_init(void)
701{
702 int cpu, err_cpu;
703
704 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
705 if (uprobe_cpu_buffer == NULL)
706 return -ENOMEM;
707
708 for_each_possible_cpu(cpu) {
709 struct page *p = alloc_pages_node(cpu_to_node(cpu),
710 GFP_KERNEL, 0);
711 if (p == NULL) {
712 err_cpu = cpu;
713 goto err;
714 }
715 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
716 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
717 }
718
719 return 0;
720
721err:
722 for_each_possible_cpu(cpu) {
723 if (cpu == err_cpu)
724 break;
725 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
726 }
727
728 free_percpu(uprobe_cpu_buffer);
729 return -ENOMEM;
730}
731
732static int uprobe_buffer_enable(void)
733{
734 int ret = 0;
735
736 BUG_ON(!mutex_is_locked(&event_mutex));
737
738 if (uprobe_buffer_refcnt++ == 0) {
739 ret = uprobe_buffer_init();
740 if (ret < 0)
741 uprobe_buffer_refcnt--;
742 }
743
744 return ret;
745}
746
747static void uprobe_buffer_disable(void)
748{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800749 int cpu;
750
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900751 BUG_ON(!mutex_is_locked(&event_mutex));
752
753 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800754 for_each_possible_cpu(cpu)
755 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
756 cpu)->buf);
757
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900758 free_percpu(uprobe_cpu_buffer);
759 uprobe_cpu_buffer = NULL;
760 }
761}
762
763static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
764{
765 struct uprobe_cpu_buffer *ucb;
766 int cpu;
767
768 cpu = raw_smp_processor_id();
769 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
770
771 /*
772 * Use per-cpu buffers for fastest access, but we might migrate
773 * so the mutex makes sure we have sole access to it.
774 */
775 mutex_lock(&ucb->mutex);
776
777 return ucb;
778}
779
780static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
781{
782 mutex_unlock(&ucb->mutex);
783}
784
Namhyung Kima43b9702014-01-17 17:08:36 +0900785static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900786 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900787 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400788 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530789{
790 struct uprobe_trace_entry_head *entry;
791 struct ring_buffer_event *event;
792 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100793 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900794 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400795 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530796
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400797 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900798
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900799 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100800 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530801
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400802 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900803 return;
804
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900805 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900806 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400807 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900808 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900809 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900810 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900811
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530812 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100813 if (is_ret_probe(tu)) {
814 entry->vaddr[0] = func;
815 entry->vaddr[1] = instruction_pointer(regs);
816 data = DATAOF_TRACE_ENTRY(entry, true);
817 } else {
818 entry->vaddr[0] = instruction_pointer(regs);
819 data = DATAOF_TRACE_ENTRY(entry, false);
820 }
821
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900822 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530823
Shashank Mittal43beb422016-05-20 13:06:09 -0700824 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100825}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100826
Oleg Nesterova51cc602013-03-30 18:02:12 +0100827/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900828static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
829 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100830{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900831 struct event_file_link *link;
832
833 if (is_ret_probe(tu))
834 return 0;
835
836 rcu_read_lock();
837 list_for_each_entry_rcu(link, &tu->tp.files, list)
838 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
839 rcu_read_unlock();
840
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100841 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530842}
843
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100844static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900845 struct pt_regs *regs,
846 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100847{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900848 struct event_file_link *link;
849
850 rcu_read_lock();
851 list_for_each_entry_rcu(link, &tu->tp.files, list)
852 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
853 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100854}
855
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530856/* Event entry printers */
857static enum print_line_t
858print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
859{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100860 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530861 struct trace_seq *s = &iter->seq;
862 struct trace_uprobe *tu;
863 u8 *data;
864 int i;
865
Oleg Nesterov457d1772013-03-29 18:26:51 +0100866 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900867 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530868
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100869 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500870 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400871 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500872 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100873 data = DATAOF_TRACE_ENTRY(entry, true);
874 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500875 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400876 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500877 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100878 data = DATAOF_TRACE_ENTRY(entry, false);
879 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530880
Namhyung Kim14577c32013-07-03 15:42:53 +0900881 for (i = 0; i < tu->tp.nr_args; i++) {
882 struct probe_arg *parg = &tu->tp.args[i];
883
884 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500885 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530886 }
887
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500888 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530889
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500890 out:
891 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530892}
893
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100894typedef bool (*filter_func_t)(struct uprobe_consumer *self,
895 enum uprobe_filter_ctx ctx,
896 struct mm_struct *mm);
897
898static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400899probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900900 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530901{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900902 bool enabled = trace_probe_is_enabled(&tu->tp);
903 struct event_file_link *link = NULL;
904 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530905
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900906 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200907 if (tu->tp.flags & TP_FLAG_PROFILE)
908 return -EINTR;
909
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900910 link = kmalloc(sizeof(*link), GFP_KERNEL);
911 if (!link)
912 return -ENOMEM;
913
914 link->file = file;
915 list_add_tail_rcu(&link->list, &tu->tp.files);
916
917 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200918 } else {
919 if (tu->tp.flags & TP_FLAG_TRACE)
920 return -EINTR;
921
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900922 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200923 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530924
Oleg Nesterov736288b2013-02-03 20:58:35 +0100925 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
926
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900927 if (enabled)
928 return 0;
929
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200930 ret = uprobe_buffer_enable();
931 if (ret)
932 goto err_flags;
933
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100934 tu->consumer.filter = filter;
Oleg Nesterova932b732013-01-31 19:47:23 +0100935 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200936 if (ret)
937 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100938
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200939 return 0;
940
941 err_buffer:
942 uprobe_buffer_disable();
943
944 err_flags:
945 if (file) {
946 list_del(&link->list);
947 kfree(link);
948 tu->tp.flags &= ~TP_FLAG_TRACE;
949 } else {
950 tu->tp.flags &= ~TP_FLAG_PROFILE;
951 }
Oleg Nesterov41618242013-01-27 18:36:24 +0100952 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530953}
954
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900955static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400956probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530957{
Namhyung Kim14577c32013-07-03 15:42:53 +0900958 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530959 return;
960
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900961 if (file) {
962 struct event_file_link *link;
963
964 link = find_event_file_link(&tu->tp, file);
965 if (!link)
966 return;
967
968 list_del_rcu(&link->list);
969 /* synchronize with u{,ret}probe_trace_func */
970 synchronize_sched();
971 kfree(link);
972
973 if (!list_empty(&tu->tp.files))
974 return;
975 }
976
Oleg Nesterov736288b2013-02-03 20:58:35 +0100977 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
978
Oleg Nesterova932b732013-01-31 19:47:23 +0100979 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900980 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900981
982 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530983}
984
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400985static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530986{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100987 int ret, i, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530988 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100989 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530990
Oleg Nesterov4d1298e2013-03-30 19:23:15 +0100991 if (is_ret_probe(tu)) {
992 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
993 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
994 size = SIZEOF_TRACE_ENTRY(true);
995 } else {
996 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
997 size = SIZEOF_TRACE_ENTRY(false);
998 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530999 /* Set argument names as fields */
Namhyung Kim14577c32013-07-03 15:42:53 +09001000 for (i = 0; i < tu->tp.nr_args; i++) {
1001 struct probe_arg *parg = &tu->tp.args[i];
1002
1003 ret = trace_define_field(event_call, parg->type->fmttype,
1004 parg->name, size + parg->offset,
1005 parg->type->size, parg->type->is_signed,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301006 FILTER_OTHER);
1007
1008 if (ret)
1009 return ret;
1010 }
1011 return 0;
1012}
1013
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301014#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001015static bool
1016__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1017{
1018 struct perf_event *event;
1019
1020 if (filter->nr_systemwide)
1021 return true;
1022
1023 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001024 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001025 return true;
1026 }
1027
1028 return false;
1029}
1030
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001031static inline bool
1032uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1033{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001034 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001035}
1036
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001037static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1038{
1039 bool done;
1040
1041 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001042 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001043 list_del(&event->hw.tp_list);
1044 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001045 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001046 uprobe_filter_event(tu, event);
1047 } else {
1048 tu->filter.nr_systemwide--;
1049 done = tu->filter.nr_systemwide;
1050 }
1051 write_unlock(&tu->filter.rwlock);
1052
1053 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001054 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001055
1056 return 0;
1057}
1058
Oleg Nesterov736288b2013-02-03 20:58:35 +01001059static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1060{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001061 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001062 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001063
Oleg Nesterov736288b2013-02-03 20:58:35 +01001064 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001065 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001066 /*
1067 * event->parent != NULL means copy_process(), we can avoid
1068 * uprobe_apply(). current->mm must be probed and we can rely
1069 * on dup_mmap() which preserves the already installed bp's.
1070 *
1071 * attr.enable_on_exec means that exec/mmap will install the
1072 * breakpoints we need.
1073 */
1074 done = tu->filter.nr_systemwide ||
1075 event->parent || event->attr.enable_on_exec ||
1076 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001077 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001078 } else {
1079 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001080 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001081 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001082 write_unlock(&tu->filter.rwlock);
1083
Oleg Nesterov927d6872014-04-24 13:33:31 +02001084 err = 0;
1085 if (!done) {
1086 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1087 if (err)
1088 uprobe_perf_close(tu, event);
1089 }
1090 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001091}
1092
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001093static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1094 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1095{
1096 struct trace_uprobe *tu;
1097 int ret;
1098
1099 tu = container_of(uc, struct trace_uprobe, consumer);
1100 read_lock(&tu->filter.rwlock);
1101 ret = __uprobe_perf_filter(&tu->filter, mm);
1102 read_unlock(&tu->filter.rwlock);
1103
1104 return ret;
1105}
1106
Namhyung Kima43b9702014-01-17 17:08:36 +09001107static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001108 unsigned long func, struct pt_regs *regs,
1109 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301110{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001111 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301112 struct uprobe_trace_entry_head *entry;
Wang Nan04a22fa2015-07-01 02:13:50 +00001113 struct bpf_prog *prog = call->prog;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301114 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001115 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001116 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001117 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301118
Wang Nan04a22fa2015-07-01 02:13:50 +00001119 if (prog && !trace_call_bpf(prog, regs))
1120 return;
1121
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001122 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1123
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001124 size = esize + tu->tp.size + dsize;
1125 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1126 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1127 return;
1128
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301129 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001130 head = this_cpu_ptr(call->perf_events);
1131 if (hlist_empty(head))
1132 goto out;
1133
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001134 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301135 if (!entry)
1136 goto out;
1137
Oleg Nesterov393a7362013-03-30 18:46:22 +01001138 if (is_ret_probe(tu)) {
1139 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001140 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001141 data = DATAOF_TRACE_ENTRY(entry, true);
1142 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001143 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001144 data = DATAOF_TRACE_ENTRY(entry, false);
1145 }
1146
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001147 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001148
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001149 if (size - esize > tu->tp.size + dsize) {
1150 int len = tu->tp.size + dsize;
1151
1152 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001153 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301154
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001155 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1156 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301157 out:
1158 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001159}
1160
1161/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001162static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1163 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001164{
1165 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1166 return UPROBE_HANDLER_REMOVE;
1167
Oleg Nesterov393a7362013-03-30 18:46:22 +01001168 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001169 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001170 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301171}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001172
1173static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001174 struct pt_regs *regs,
1175 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001176{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001177 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001178}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301179#endif /* CONFIG_PERF_EVENTS */
1180
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001181static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001182trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001183 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301184{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001185 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001186 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301187
1188 switch (type) {
1189 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001190 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301191
1192 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001193 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301194 return 0;
1195
1196#ifdef CONFIG_PERF_EVENTS
1197 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001198 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301199
1200 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001201 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301202 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001203
1204 case TRACE_REG_PERF_OPEN:
1205 return uprobe_perf_open(tu, data);
1206
1207 case TRACE_REG_PERF_CLOSE:
1208 return uprobe_perf_close(tu, data);
1209
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301210#endif
1211 default:
1212 return 0;
1213 }
1214 return 0;
1215}
1216
1217static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1218{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301219 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001220 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001221 struct uprobe_cpu_buffer *ucb;
1222 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001223 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301224
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001225
Oleg Nesterova932b732013-01-31 19:47:23 +01001226 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001227 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301228
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001229 udd.tu = tu;
1230 udd.bp_addr = instruction_pointer(regs);
1231
1232 current->utask->vaddr = (unsigned long) &udd;
1233
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001234 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1235 return 0;
1236
1237 dsize = __get_data_size(&tu->tp, regs);
1238 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1239
1240 ucb = uprobe_buffer_get();
1241 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1242
Namhyung Kim14577c32013-07-03 15:42:53 +09001243 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001244 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301245
1246#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001247 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001248 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301249#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001250 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001251 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301252}
1253
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001254static int uretprobe_dispatcher(struct uprobe_consumer *con,
1255 unsigned long func, struct pt_regs *regs)
1256{
1257 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001258 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001259 struct uprobe_cpu_buffer *ucb;
1260 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001261
1262 tu = container_of(con, struct trace_uprobe, consumer);
1263
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001264 udd.tu = tu;
1265 udd.bp_addr = func;
1266
1267 current->utask->vaddr = (unsigned long) &udd;
1268
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001269 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1270 return 0;
1271
1272 dsize = __get_data_size(&tu->tp, regs);
1273 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1274
1275 ucb = uprobe_buffer_get();
1276 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1277
Namhyung Kim14577c32013-07-03 15:42:53 +09001278 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001279 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001280
1281#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001282 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001283 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001284#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001285 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001286 return 0;
1287}
1288
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301289static struct trace_event_functions uprobe_funcs = {
1290 .trace = print_uprobe_event
1291};
1292
1293static int register_uprobe_event(struct trace_uprobe *tu)
1294{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001295 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301296 int ret;
1297
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001298 /* Initialize trace_event_call */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301299 INIT_LIST_HEAD(&call->class->fields);
1300 call->event.funcs = &uprobe_funcs;
1301 call->class->define_fields = uprobe_event_define_fields;
1302
Namhyung Kim5bf652a2013-07-03 16:09:02 +09001303 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301304 return -ENOMEM;
1305
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001306 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301307 if (!ret) {
1308 kfree(call->print_fmt);
1309 return -ENODEV;
1310 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001311
Wang Nan04a22fa2015-07-01 02:13:50 +00001312 call->flags = TRACE_EVENT_FL_UPROBE;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301313 call->class->reg = trace_uprobe_register;
1314 call->data = tu;
1315 ret = trace_add_event_call(call);
1316
1317 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001318 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001319 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301320 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001321 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301322 }
1323
1324 return ret;
1325}
1326
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001327static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301328{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001329 int ret;
1330
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301331 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001332 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001333 if (ret)
1334 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001335 kfree(tu->tp.call.print_fmt);
1336 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001337 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301338}
1339
1340/* Make a trace interface for controling probe points */
1341static __init int init_uprobe_trace(void)
1342{
1343 struct dentry *d_tracer;
1344
1345 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001346 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301347 return 0;
1348
1349 trace_create_file("uprobe_events", 0644, d_tracer,
1350 NULL, &uprobe_events_ops);
1351 /* Profile interface */
1352 trace_create_file("uprobe_profile", 0444, d_tracer,
1353 NULL, &uprobe_profile_ops);
1354 return 0;
1355}
1356
1357fs_initcall(init_uprobe_trace);