blob: 7a687320f8671c7e82ee1c5ebca6dc7d2b77380c [file] [log] [blame]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/uprobes.h>
24#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080025#include <linux/string.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053026
27#include "trace_probe.h"
28
29#define UPROBE_EVENT_SYSTEM "uprobes"
30
Oleg Nesterov457d1772013-03-29 18:26:51 +010031struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
Oleg Nesterov736288b2013-02-03 20:58:35 +010043struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47};
48
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053049/*
50 * uprobe event core functions
51 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052struct trace_uprobe {
53 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010054 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010055 struct uprobe_consumer consumer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053056 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090060 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053061};
62
Namhyung Kim14577c32013-07-03 15:42:53 +090063#define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065 (sizeof(struct probe_arg) * (n)))
66
67static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040068static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053069
70static DEFINE_MUTEX(uprobe_lock);
71static LIST_HEAD(uprobe_list);
72
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090073struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
76};
77
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053078static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010079static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053081
Namhyung Kim3fd996a2013-11-26 15:21:04 +090082#ifdef CONFIG_STACK_GROWSUP
83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84{
85 return addr - (n * sizeof(long));
86}
87#else
88static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89{
90 return addr + (n * sizeof(long));
91}
92#endif
93
94static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95{
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
98
99 addr = adjust_stack_addr(addr, n);
100
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
103
104 return ret;
105}
106
107/*
108 * Uprobes-specific fetch functions
109 */
110#define DEFINE_FETCH_stack(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900113{ \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
116}
117DEFINE_BASIC_FETCH_FUNCS(stack)
118/* No string on the stack entry */
119#define fetch_stack_string NULL
120#define fetch_stack_string_size NULL
121
Namhyung Kim5baaa592013-11-26 15:21:04 +0900122#define DEFINE_FETCH_memory(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \
Namhyung Kim5baaa592013-11-26 15:21:04 +0900125{ \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
128 \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
133}
134DEFINE_BASIC_FETCH_FUNCS(memory)
135/*
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
138 */
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900141{
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
147
148 if (!maxlen)
149 return;
150
151 ret = strncpy_from_user(dst, src, maxlen);
152
153 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0';
155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 } else {
157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 }
159}
160
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900163{
164 int len;
165 void __user *vaddr = (void __force __user *) addr;
166
167 len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170 *(u32 *)dest = 0;
171 else
172 *(u32 *)dest = len;
173}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900174
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900175static unsigned long translate_user_vaddr(void *file_offset)
176{
177 unsigned long base_addr;
178 struct uprobe_dispatch_data *udd;
179
180 udd = (void *) current->utask->vaddr;
181
182 base_addr = udd->bp_addr - udd->tu->offset;
183 return base_addr + (unsigned long)file_offset;
184}
185
186#define DEFINE_FETCH_file_offset(type) \
Masami Hiramatsufbc19632014-04-17 17:18:00 +0900187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest)\
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900189{ \
190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \
192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
193}
194DEFINE_BASIC_FETCH_FUNCS(file_offset)
195DEFINE_FETCH_file_offset(string)
196DEFINE_FETCH_file_offset(string_size)
197
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900198/* Fetch type information table */
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100199static const struct fetch_type uprobes_fetch_type_table[] = {
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900200 /* Special types */
201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 sizeof(u32), 1, "__data_loc char[]"),
203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 string_size, sizeof(u32), 0, "u32"),
205 /* Basic types */
206 ASSIGN_FETCH_TYPE(u8, u8, 0),
207 ASSIGN_FETCH_TYPE(u16, u16, 0),
208 ASSIGN_FETCH_TYPE(u32, u32, 0),
209 ASSIGN_FETCH_TYPE(u64, u64, 0),
210 ASSIGN_FETCH_TYPE(s8, u8, 1),
211 ASSIGN_FETCH_TYPE(s16, u16, 1),
212 ASSIGN_FETCH_TYPE(s32, u32, 1),
213 ASSIGN_FETCH_TYPE(s64, u64, 1),
Masami Hiramatsu17ce3dc2016-08-18 17:57:50 +0900214 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
215 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
216 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
217 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900218
219 ASSIGN_FETCH_TYPE_END
220};
221
Oleg Nesterov736288b2013-02-03 20:58:35 +0100222static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
223{
224 rwlock_init(&filter->rwlock);
225 filter->nr_systemwide = 0;
226 INIT_LIST_HEAD(&filter->perf_events);
227}
228
229static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
230{
231 return !filter->nr_systemwide && list_empty(&filter->perf_events);
232}
233
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100234static inline bool is_ret_probe(struct trace_uprobe *tu)
235{
236 return tu->consumer.ret_handler != NULL;
237}
238
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530239/*
240 * Allocate new trace_uprobe and initialize it (including uprobes).
241 */
242static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100243alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530244{
245 struct trace_uprobe *tu;
246
247 if (!event || !is_good_name(event))
248 return ERR_PTR(-EINVAL);
249
250 if (!group || !is_good_name(group))
251 return ERR_PTR(-EINVAL);
252
253 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
254 if (!tu)
255 return ERR_PTR(-ENOMEM);
256
Namhyung Kim14577c32013-07-03 15:42:53 +0900257 tu->tp.call.class = &tu->tp.class;
258 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
259 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530260 goto error;
261
Namhyung Kim14577c32013-07-03 15:42:53 +0900262 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
263 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530264 goto error;
265
266 INIT_LIST_HEAD(&tu->list);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900267 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100268 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100269 if (is_ret)
270 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100271 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530272 return tu;
273
274error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900275 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530276 kfree(tu);
277
278 return ERR_PTR(-ENOMEM);
279}
280
281static void free_trace_uprobe(struct trace_uprobe *tu)
282{
283 int i;
284
Namhyung Kim14577c32013-07-03 15:42:53 +0900285 for (i = 0; i < tu->tp.nr_args; i++)
286 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530287
288 iput(tu->inode);
Namhyung Kim14577c32013-07-03 15:42:53 +0900289 kfree(tu->tp.call.class->system);
290 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530291 kfree(tu->filename);
292 kfree(tu);
293}
294
295static struct trace_uprobe *find_probe_event(const char *event, const char *group)
296{
297 struct trace_uprobe *tu;
298
299 list_for_each_entry(tu, &uprobe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400300 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900301 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530302 return tu;
303
304 return NULL;
305}
306
307/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400308static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530309{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400310 int ret;
311
312 ret = unregister_uprobe_event(tu);
313 if (ret)
314 return ret;
315
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530316 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530317 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400318 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530319}
320
321/* Register a trace_uprobe and probe_event */
322static int register_trace_uprobe(struct trace_uprobe *tu)
323{
Namhyung Kim14577c32013-07-03 15:42:53 +0900324 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530325 int ret;
326
327 mutex_lock(&uprobe_lock);
328
329 /* register as an event */
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400330 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400331 tu->tp.call.class->system);
Namhyung Kim14577c32013-07-03 15:42:53 +0900332 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530333 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900334 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400335 if (ret)
336 goto end;
337 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530338
339 ret = register_uprobe_event(tu);
340 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700341 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530342 goto end;
343 }
344
345 list_add_tail(&tu->list, &uprobe_list);
346
347end:
348 mutex_unlock(&uprobe_lock);
349
350 return ret;
351}
352
353/*
354 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900355 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 *
357 * - Remove uprobe: -:[GRP/]EVENT
358 */
359static int create_trace_uprobe(int argc, char **argv)
360{
361 struct trace_uprobe *tu;
362 struct inode *inode;
363 char *arg, *event, *group, *filename;
364 char buf[MAX_EVENT_NAME_LEN];
365 struct path path;
366 unsigned long offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100367 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530368 int i, ret;
369
370 inode = NULL;
371 ret = 0;
372 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100373 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530374 event = NULL;
375 group = NULL;
376
377 /* argc must be >= 1 */
378 if (argv[0][0] == '-')
379 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100380 else if (argv[0][0] == 'r')
381 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530382 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100383 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530384 return -EINVAL;
385 }
386
387 if (argv[0][1] == ':') {
388 event = &argv[0][2];
389 arg = strchr(event, '/');
390
391 if (arg) {
392 group = event;
393 event = arg + 1;
394 event[-1] = '\0';
395
396 if (strlen(group) == 0) {
397 pr_info("Group name is not specified\n");
398 return -EINVAL;
399 }
400 }
401 if (strlen(event) == 0) {
402 pr_info("Event name is not specified\n");
403 return -EINVAL;
404 }
405 }
406 if (!group)
407 group = UPROBE_EVENT_SYSTEM;
408
409 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400410 int ret;
411
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530412 if (!event) {
413 pr_info("Delete command needs an event name.\n");
414 return -EINVAL;
415 }
416 mutex_lock(&uprobe_lock);
417 tu = find_probe_event(event, group);
418
419 if (!tu) {
420 mutex_unlock(&uprobe_lock);
421 pr_info("Event %s/%s doesn't exist.\n", group, event);
422 return -ENOENT;
423 }
424 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400425 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530426 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400427 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530428 }
429
430 if (argc < 2) {
431 pr_info("Probe point is not specified.\n");
432 return -EINVAL;
433 }
434 if (isdigit(argv[1][0])) {
435 pr_info("probe point must be have a filename.\n");
436 return -EINVAL;
437 }
438 arg = strchr(argv[1], ':');
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800439 if (!arg) {
440 ret = -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530441 goto fail_address_parse;
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800442 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530443
444 *arg++ = '\0';
445 filename = argv[1];
446 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
447 if (ret)
448 goto fail_address_parse;
449
David Howells7682c912015-03-17 22:26:16 +0000450 inode = igrab(d_inode(path.dentry));
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100451 path_put(&path);
452
Oleg Nesterov7e4e28c2013-01-28 17:08:47 +0100453 if (!inode || !S_ISREG(inode->i_mode)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800454 ret = -EINVAL;
455 goto fail_address_parse;
456 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530457
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100458 ret = kstrtoul(arg, 0, &offset);
459 if (ret)
460 goto fail_address_parse;
461
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530462 argc -= 2;
463 argv += 2;
464
465 /* setup a probe */
466 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800467 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530468 char *ptr;
469
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800470 tail = kstrdup(kbasename(filename), GFP_KERNEL);
471 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530472 ret = -ENOMEM;
473 goto fail_address_parse;
474 }
475
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530476 ptr = strpbrk(tail, ".-_");
477 if (ptr)
478 *ptr = '\0';
479
480 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
481 event = buf;
482 kfree(tail);
483 }
484
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100485 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530486 if (IS_ERR(tu)) {
487 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
488 ret = PTR_ERR(tu);
489 goto fail_address_parse;
490 }
491 tu->offset = offset;
492 tu->inode = inode;
493 tu->filename = kstrdup(filename, GFP_KERNEL);
494
495 if (!tu->filename) {
496 pr_info("Failed to allocate filename.\n");
497 ret = -ENOMEM;
498 goto error;
499 }
500
501 /* parse arguments */
502 ret = 0;
503 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900504 struct probe_arg *parg = &tu->tp.args[i];
505
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530506 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900507 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530508
509 /* Parse argument name */
510 arg = strchr(argv[i], '=');
511 if (arg) {
512 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900513 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530514 } else {
515 arg = argv[i];
516 /* If argument name is omitted, set "argN" */
517 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900518 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530519 }
520
Namhyung Kim14577c32013-07-03 15:42:53 +0900521 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530522 pr_info("Failed to allocate argument[%d] name.\n", i);
523 ret = -ENOMEM;
524 goto error;
525 }
526
Namhyung Kim14577c32013-07-03 15:42:53 +0900527 if (!is_good_name(parg->name)) {
528 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530529 ret = -EINVAL;
530 goto error;
531 }
532
Namhyung Kim14577c32013-07-03 15:42:53 +0900533 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530534 pr_info("Argument[%d] name '%s' conflicts with "
535 "another field.\n", i, argv[i]);
536 ret = -EINVAL;
537 goto error;
538 }
539
540 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900541 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
Stephen Rothwelld9a16d32015-03-12 16:58:34 +1100542 is_return, false,
543 uprobes_fetch_type_table);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530544 if (ret) {
545 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
546 goto error;
547 }
548 }
549
550 ret = register_trace_uprobe(tu);
551 if (ret)
552 goto error;
553 return 0;
554
555error:
556 free_trace_uprobe(tu);
557 return ret;
558
559fail_address_parse:
Markus Elfring16a8ef22014-11-16 14:46:28 +0100560 iput(inode);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530561
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800562 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530563
564 return ret;
565}
566
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400567static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530568{
569 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400570 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530571
572 mutex_lock(&uprobe_lock);
573 while (!list_empty(&uprobe_list)) {
574 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400575 ret = unregister_trace_uprobe(tu);
576 if (ret)
577 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530578 }
579 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400580 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530581}
582
583/* Probes listing interfaces */
584static void *probes_seq_start(struct seq_file *m, loff_t *pos)
585{
586 mutex_lock(&uprobe_lock);
587 return seq_list_start(&uprobe_list, *pos);
588}
589
590static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
591{
592 return seq_list_next(v, &uprobe_list, pos);
593}
594
595static void probes_seq_stop(struct seq_file *m, void *v)
596{
597 mutex_unlock(&uprobe_lock);
598}
599
600static int probes_seq_show(struct seq_file *m, void *v)
601{
602 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100603 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530604 int i;
605
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400606 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400607 trace_event_name(&tu->tp.call));
Wang Nana2fb3382015-08-26 10:57:46 +0000608 seq_printf(m, " %s:", tu->filename);
609
610 /* Don't print "0x (null)" when offset is 0 */
611 if (tu->offset) {
612 seq_printf(m, "0x%p", (void *)tu->offset);
613 } else {
614 switch (sizeof(void *)) {
615 case 4:
616 seq_printf(m, "0x00000000");
617 break;
618 case 8:
619 default:
620 seq_printf(m, "0x0000000000000000");
621 break;
622 }
623 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530624
Namhyung Kim14577c32013-07-03 15:42:53 +0900625 for (i = 0; i < tu->tp.nr_args; i++)
626 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530627
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100628 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530629 return 0;
630}
631
632static const struct seq_operations probes_seq_op = {
633 .start = probes_seq_start,
634 .next = probes_seq_next,
635 .stop = probes_seq_stop,
636 .show = probes_seq_show
637};
638
639static int probes_open(struct inode *inode, struct file *file)
640{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400641 int ret;
642
643 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
644 ret = cleanup_all_probes();
645 if (ret)
646 return ret;
647 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530648
649 return seq_open(file, &probes_seq_op);
650}
651
652static ssize_t probes_write(struct file *file, const char __user *buffer,
653 size_t count, loff_t *ppos)
654{
655 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
656}
657
658static const struct file_operations uprobe_events_ops = {
659 .owner = THIS_MODULE,
660 .open = probes_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = seq_release,
664 .write = probes_write,
665};
666
667/* Probes profiling interfaces */
668static int probes_profile_seq_show(struct seq_file *m, void *v)
669{
670 struct trace_uprobe *tu = v;
671
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400672 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400673 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530674 return 0;
675}
676
677static const struct seq_operations profile_seq_op = {
678 .start = probes_seq_start,
679 .next = probes_seq_next,
680 .stop = probes_seq_stop,
681 .show = probes_profile_seq_show
682};
683
684static int profile_open(struct inode *inode, struct file *file)
685{
686 return seq_open(file, &profile_seq_op);
687}
688
689static const struct file_operations uprobe_profile_ops = {
690 .owner = THIS_MODULE,
691 .open = profile_open,
692 .read = seq_read,
693 .llseek = seq_lseek,
694 .release = seq_release,
695};
696
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900697struct uprobe_cpu_buffer {
698 struct mutex mutex;
699 void *buf;
700};
701static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
702static int uprobe_buffer_refcnt;
703
704static int uprobe_buffer_init(void)
705{
706 int cpu, err_cpu;
707
708 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
709 if (uprobe_cpu_buffer == NULL)
710 return -ENOMEM;
711
712 for_each_possible_cpu(cpu) {
713 struct page *p = alloc_pages_node(cpu_to_node(cpu),
714 GFP_KERNEL, 0);
715 if (p == NULL) {
716 err_cpu = cpu;
717 goto err;
718 }
719 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
720 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
721 }
722
723 return 0;
724
725err:
726 for_each_possible_cpu(cpu) {
727 if (cpu == err_cpu)
728 break;
729 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
730 }
731
732 free_percpu(uprobe_cpu_buffer);
733 return -ENOMEM;
734}
735
736static int uprobe_buffer_enable(void)
737{
738 int ret = 0;
739
740 BUG_ON(!mutex_is_locked(&event_mutex));
741
742 if (uprobe_buffer_refcnt++ == 0) {
743 ret = uprobe_buffer_init();
744 if (ret < 0)
745 uprobe_buffer_refcnt--;
746 }
747
748 return ret;
749}
750
751static void uprobe_buffer_disable(void)
752{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800753 int cpu;
754
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900755 BUG_ON(!mutex_is_locked(&event_mutex));
756
757 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800758 for_each_possible_cpu(cpu)
759 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
760 cpu)->buf);
761
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900762 free_percpu(uprobe_cpu_buffer);
763 uprobe_cpu_buffer = NULL;
764 }
765}
766
767static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
768{
769 struct uprobe_cpu_buffer *ucb;
770 int cpu;
771
772 cpu = raw_smp_processor_id();
773 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
774
775 /*
776 * Use per-cpu buffers for fastest access, but we might migrate
777 * so the mutex makes sure we have sole access to it.
778 */
779 mutex_lock(&ucb->mutex);
780
781 return ucb;
782}
783
784static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
785{
786 mutex_unlock(&ucb->mutex);
787}
788
Namhyung Kima43b9702014-01-17 17:08:36 +0900789static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900790 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900791 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400792 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530793{
794 struct uprobe_trace_entry_head *entry;
795 struct ring_buffer_event *event;
796 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100797 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900798 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400799 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530800
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400801 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900802
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900803 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100804 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530805
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400806 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900807 return;
808
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900809 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900810 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400811 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900812 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900813 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900814 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900815
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530816 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100817 if (is_ret_probe(tu)) {
818 entry->vaddr[0] = func;
819 entry->vaddr[1] = instruction_pointer(regs);
820 data = DATAOF_TRACE_ENTRY(entry, true);
821 } else {
822 entry->vaddr[0] = instruction_pointer(regs);
823 data = DATAOF_TRACE_ENTRY(entry, false);
824 }
825
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900826 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530827
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400828 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100829}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100830
Oleg Nesterova51cc602013-03-30 18:02:12 +0100831/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900832static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
833 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100834{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900835 struct event_file_link *link;
836
837 if (is_ret_probe(tu))
838 return 0;
839
840 rcu_read_lock();
841 list_for_each_entry_rcu(link, &tu->tp.files, list)
842 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
843 rcu_read_unlock();
844
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100845 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530846}
847
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100848static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900849 struct pt_regs *regs,
850 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100851{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900852 struct event_file_link *link;
853
854 rcu_read_lock();
855 list_for_each_entry_rcu(link, &tu->tp.files, list)
856 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
857 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100858}
859
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530860/* Event entry printers */
861static enum print_line_t
862print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
863{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100864 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530865 struct trace_seq *s = &iter->seq;
866 struct trace_uprobe *tu;
867 u8 *data;
868 int i;
869
Oleg Nesterov457d1772013-03-29 18:26:51 +0100870 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900871 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530872
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100873 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500874 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400875 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500876 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100877 data = DATAOF_TRACE_ENTRY(entry, true);
878 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500879 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400880 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500881 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100882 data = DATAOF_TRACE_ENTRY(entry, false);
883 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530884
Namhyung Kim14577c32013-07-03 15:42:53 +0900885 for (i = 0; i < tu->tp.nr_args; i++) {
886 struct probe_arg *parg = &tu->tp.args[i];
887
888 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500889 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530890 }
891
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500892 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530893
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500894 out:
895 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530896}
897
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100898typedef bool (*filter_func_t)(struct uprobe_consumer *self,
899 enum uprobe_filter_ctx ctx,
900 struct mm_struct *mm);
901
902static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400903probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900904 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530905{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900906 bool enabled = trace_probe_is_enabled(&tu->tp);
907 struct event_file_link *link = NULL;
908 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530909
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900910 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200911 if (tu->tp.flags & TP_FLAG_PROFILE)
912 return -EINTR;
913
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900914 link = kmalloc(sizeof(*link), GFP_KERNEL);
915 if (!link)
916 return -ENOMEM;
917
918 link->file = file;
919 list_add_tail_rcu(&link->list, &tu->tp.files);
920
921 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200922 } else {
923 if (tu->tp.flags & TP_FLAG_TRACE)
924 return -EINTR;
925
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900926 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200927 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530928
Oleg Nesterov736288b2013-02-03 20:58:35 +0100929 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
930
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900931 if (enabled)
932 return 0;
933
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200934 ret = uprobe_buffer_enable();
935 if (ret)
936 goto err_flags;
937
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100938 tu->consumer.filter = filter;
Oleg Nesterova932b732013-01-31 19:47:23 +0100939 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200940 if (ret)
941 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100942
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200943 return 0;
944
945 err_buffer:
946 uprobe_buffer_disable();
947
948 err_flags:
949 if (file) {
950 list_del(&link->list);
951 kfree(link);
952 tu->tp.flags &= ~TP_FLAG_TRACE;
953 } else {
954 tu->tp.flags &= ~TP_FLAG_PROFILE;
955 }
Oleg Nesterov41618242013-01-27 18:36:24 +0100956 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530957}
958
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900959static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400960probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530961{
Namhyung Kim14577c32013-07-03 15:42:53 +0900962 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530963 return;
964
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900965 if (file) {
966 struct event_file_link *link;
967
968 link = find_event_file_link(&tu->tp, file);
969 if (!link)
970 return;
971
972 list_del_rcu(&link->list);
973 /* synchronize with u{,ret}probe_trace_func */
974 synchronize_sched();
975 kfree(link);
976
977 if (!list_empty(&tu->tp.files))
978 return;
979 }
980
Oleg Nesterov736288b2013-02-03 20:58:35 +0100981 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
982
Oleg Nesterova932b732013-01-31 19:47:23 +0100983 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900984 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900985
986 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530987}
988
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400989static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530990{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100991 int ret, i, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530992 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100993 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530994
Oleg Nesterov4d1298e2013-03-30 19:23:15 +0100995 if (is_ret_probe(tu)) {
996 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
997 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
998 size = SIZEOF_TRACE_ENTRY(true);
999 } else {
1000 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1001 size = SIZEOF_TRACE_ENTRY(false);
1002 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301003 /* Set argument names as fields */
Namhyung Kim14577c32013-07-03 15:42:53 +09001004 for (i = 0; i < tu->tp.nr_args; i++) {
1005 struct probe_arg *parg = &tu->tp.args[i];
1006
1007 ret = trace_define_field(event_call, parg->type->fmttype,
1008 parg->name, size + parg->offset,
1009 parg->type->size, parg->type->is_signed,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301010 FILTER_OTHER);
1011
1012 if (ret)
1013 return ret;
1014 }
1015 return 0;
1016}
1017
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301018#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001019static bool
1020__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1021{
1022 struct perf_event *event;
1023
1024 if (filter->nr_systemwide)
1025 return true;
1026
1027 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001028 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001029 return true;
1030 }
1031
1032 return false;
1033}
1034
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001035static inline bool
1036uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1037{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001038 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001039}
1040
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001041static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1042{
1043 bool done;
1044
1045 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001046 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001047 list_del(&event->hw.tp_list);
1048 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001049 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001050 uprobe_filter_event(tu, event);
1051 } else {
1052 tu->filter.nr_systemwide--;
1053 done = tu->filter.nr_systemwide;
1054 }
1055 write_unlock(&tu->filter.rwlock);
1056
1057 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001058 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001059
1060 return 0;
1061}
1062
Oleg Nesterov736288b2013-02-03 20:58:35 +01001063static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1064{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001065 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001066 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001067
Oleg Nesterov736288b2013-02-03 20:58:35 +01001068 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001069 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001070 /*
1071 * event->parent != NULL means copy_process(), we can avoid
1072 * uprobe_apply(). current->mm must be probed and we can rely
1073 * on dup_mmap() which preserves the already installed bp's.
1074 *
1075 * attr.enable_on_exec means that exec/mmap will install the
1076 * breakpoints we need.
1077 */
1078 done = tu->filter.nr_systemwide ||
1079 event->parent || event->attr.enable_on_exec ||
1080 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001081 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001082 } else {
1083 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001084 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001085 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001086 write_unlock(&tu->filter.rwlock);
1087
Oleg Nesterov927d6872014-04-24 13:33:31 +02001088 err = 0;
1089 if (!done) {
1090 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1091 if (err)
1092 uprobe_perf_close(tu, event);
1093 }
1094 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001095}
1096
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001097static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1098 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1099{
1100 struct trace_uprobe *tu;
1101 int ret;
1102
1103 tu = container_of(uc, struct trace_uprobe, consumer);
1104 read_lock(&tu->filter.rwlock);
1105 ret = __uprobe_perf_filter(&tu->filter, mm);
1106 read_unlock(&tu->filter.rwlock);
1107
1108 return ret;
1109}
1110
Namhyung Kima43b9702014-01-17 17:08:36 +09001111static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001112 unsigned long func, struct pt_regs *regs,
1113 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301114{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001115 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301116 struct uprobe_trace_entry_head *entry;
Wang Nan04a22fa2015-07-01 02:13:50 +00001117 struct bpf_prog *prog = call->prog;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301118 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001119 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001120 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001121 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301122
Wang Nan04a22fa2015-07-01 02:13:50 +00001123 if (prog && !trace_call_bpf(prog, regs))
1124 return;
1125
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001126 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1127
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001128 size = esize + tu->tp.size + dsize;
1129 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1130 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1131 return;
1132
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301133 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001134 head = this_cpu_ptr(call->perf_events);
1135 if (hlist_empty(head))
1136 goto out;
1137
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001138 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301139 if (!entry)
1140 goto out;
1141
Oleg Nesterov393a7362013-03-30 18:46:22 +01001142 if (is_ret_probe(tu)) {
1143 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001144 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001145 data = DATAOF_TRACE_ENTRY(entry, true);
1146 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001147 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001148 data = DATAOF_TRACE_ENTRY(entry, false);
1149 }
1150
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001151 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001152
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001153 if (size - esize > tu->tp.size + dsize) {
1154 int len = tu->tp.size + dsize;
1155
1156 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001157 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301158
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001159 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1160 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301161 out:
1162 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001163}
1164
1165/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001166static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1167 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001168{
1169 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1170 return UPROBE_HANDLER_REMOVE;
1171
Oleg Nesterov393a7362013-03-30 18:46:22 +01001172 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001173 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001174 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301175}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001176
1177static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001178 struct pt_regs *regs,
1179 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001180{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001181 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001182}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301183#endif /* CONFIG_PERF_EVENTS */
1184
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001185static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001186trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001187 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301188{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001189 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001190 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301191
1192 switch (type) {
1193 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001194 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301195
1196 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001197 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301198 return 0;
1199
1200#ifdef CONFIG_PERF_EVENTS
1201 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001202 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301203
1204 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001205 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301206 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001207
1208 case TRACE_REG_PERF_OPEN:
1209 return uprobe_perf_open(tu, data);
1210
1211 case TRACE_REG_PERF_CLOSE:
1212 return uprobe_perf_close(tu, data);
1213
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301214#endif
1215 default:
1216 return 0;
1217 }
1218 return 0;
1219}
1220
1221static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1222{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301223 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001224 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001225 struct uprobe_cpu_buffer *ucb;
1226 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001227 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301228
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001229
Oleg Nesterova932b732013-01-31 19:47:23 +01001230 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001231 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301232
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001233 udd.tu = tu;
1234 udd.bp_addr = instruction_pointer(regs);
1235
1236 current->utask->vaddr = (unsigned long) &udd;
1237
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001238 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1239 return 0;
1240
1241 dsize = __get_data_size(&tu->tp, regs);
1242 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1243
1244 ucb = uprobe_buffer_get();
1245 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1246
Namhyung Kim14577c32013-07-03 15:42:53 +09001247 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001248 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301249
1250#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001251 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001252 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301253#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001254 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001255 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301256}
1257
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001258static int uretprobe_dispatcher(struct uprobe_consumer *con,
1259 unsigned long func, struct pt_regs *regs)
1260{
1261 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001262 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001263 struct uprobe_cpu_buffer *ucb;
1264 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001265
1266 tu = container_of(con, struct trace_uprobe, consumer);
1267
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001268 udd.tu = tu;
1269 udd.bp_addr = func;
1270
1271 current->utask->vaddr = (unsigned long) &udd;
1272
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001273 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1274 return 0;
1275
1276 dsize = __get_data_size(&tu->tp, regs);
1277 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1278
1279 ucb = uprobe_buffer_get();
1280 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1281
Namhyung Kim14577c32013-07-03 15:42:53 +09001282 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001283 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001284
1285#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001286 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001287 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001288#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001289 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001290 return 0;
1291}
1292
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301293static struct trace_event_functions uprobe_funcs = {
1294 .trace = print_uprobe_event
1295};
1296
1297static int register_uprobe_event(struct trace_uprobe *tu)
1298{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001299 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301300 int ret;
1301
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001302 /* Initialize trace_event_call */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301303 INIT_LIST_HEAD(&call->class->fields);
1304 call->event.funcs = &uprobe_funcs;
1305 call->class->define_fields = uprobe_event_define_fields;
1306
Namhyung Kim5bf652a2013-07-03 16:09:02 +09001307 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301308 return -ENOMEM;
1309
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001310 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301311 if (!ret) {
1312 kfree(call->print_fmt);
1313 return -ENODEV;
1314 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001315
Wang Nan04a22fa2015-07-01 02:13:50 +00001316 call->flags = TRACE_EVENT_FL_UPROBE;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301317 call->class->reg = trace_uprobe_register;
1318 call->data = tu;
1319 ret = trace_add_event_call(call);
1320
1321 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001322 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001323 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301324 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001325 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301326 }
1327
1328 return ret;
1329}
1330
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001331static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301332{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001333 int ret;
1334
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301335 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001336 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001337 if (ret)
1338 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001339 kfree(tu->tp.call.print_fmt);
1340 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001341 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301342}
1343
1344/* Make a trace interface for controling probe points */
1345static __init int init_uprobe_trace(void)
1346{
1347 struct dentry *d_tracer;
1348
1349 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001350 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301351 return 0;
1352
1353 trace_create_file("uprobe_events", 0644, d_tracer,
1354 NULL, &uprobe_events_ops);
1355 /* Profile interface */
1356 trace_create_file("uprobe_profile", 0444, d_tracer,
1357 NULL, &uprobe_profile_ops);
1358 return 0;
1359}
1360
1361fs_initcall(init_uprobe_trace);