blob: 31ea48eceda184ed5e807f9fe22394f831980982 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Masami Hiramatsu72576342017-02-07 20:21:28 +09008#define pr_fmt(fmt) "trace_kprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/uprobes.h>
13#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080014#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010015#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053016
17#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090018#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053019
20#define UPROBE_EVENT_SYSTEM "uprobes"
21
Oleg Nesterov457d1772013-03-29 18:26:51 +010022struct uprobe_trace_entry_head {
23 struct trace_entry ent;
24 unsigned long vaddr[];
25};
26
27#define SIZEOF_TRACE_ENTRY(is_return) \
28 (sizeof(struct uprobe_trace_entry_head) + \
29 sizeof(unsigned long) * (is_return ? 2 : 1))
30
31#define DATAOF_TRACE_ENTRY(entry, is_return) \
32 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
33
Oleg Nesterov736288b2013-02-03 20:58:35 +010034struct trace_uprobe_filter {
35 rwlock_t rwlock;
36 int nr_systemwide;
37 struct list_head perf_events;
38};
39
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053040/*
41 * uprobe event core functions
42 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053043struct trace_uprobe {
44 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010045 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010046 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070047 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053048 struct inode *inode;
49 char *filename;
50 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053051 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090053 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053054};
55
Namhyung Kim14577c32013-07-03 15:42:53 +090056#define SIZEOF_TRACE_UPROBE(n) \
57 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053058 (sizeof(struct probe_arg) * (n)))
59
60static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040061static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053062
63static DEFINE_MUTEX(uprobe_lock);
64static LIST_HEAD(uprobe_list);
65
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090066struct uprobe_dispatch_data {
67 struct trace_uprobe *tu;
68 unsigned long bp_addr;
69};
70
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053071static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010072static int uretprobe_dispatcher(struct uprobe_consumer *con,
73 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053074
Namhyung Kim3fd996a2013-11-26 15:21:04 +090075#ifdef CONFIG_STACK_GROWSUP
76static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
77{
78 return addr - (n * sizeof(long));
79}
80#else
81static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
82{
83 return addr + (n * sizeof(long));
84}
85#endif
86
87static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
88{
89 unsigned long ret;
90 unsigned long addr = user_stack_pointer(regs);
91
92 addr = adjust_stack_addr(addr, n);
93
94 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
95 return 0;
96
97 return ret;
98}
99
100/*
101 * Uprobes-specific fetch functions
102 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900103static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900104probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900105{
106 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900107
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900108 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900109}
Namhyung Kim5baaa592013-11-26 15:21:04 +0900110/*
111 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
112 * length and relative data location.
113 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900114static nokprobe_inline int
115fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900116{
117 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900118 u32 loc = *(u32 *)dest;
119 int maxlen = get_loc_len(loc);
120 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900121 void __user *src = (void __force __user *) addr;
122
Masami Hiramatsu91784122018-04-25 21:19:01 +0900123 if (unlikely(!maxlen))
124 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900125
126 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900127 if (ret >= 0) {
128 if (ret == maxlen)
129 dst[ret - 1] = '\0';
130 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900131 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900132
133 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900134}
135
Masami Hiramatsu53305922018-04-25 21:18:03 +0900136/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900137static nokprobe_inline int
138fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900139{
140 int len;
141 void __user *vaddr = (void __force __user *) addr;
142
143 len = strnlen_user(vaddr, MAX_STRING_SIZE);
144
Masami Hiramatsu91784122018-04-25 21:19:01 +0900145 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900146}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900147
Masami Hiramatsu53305922018-04-25 21:18:03 +0900148static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900149{
150 unsigned long base_addr;
151 struct uprobe_dispatch_data *udd;
152
153 udd = (void *) current->utask->vaddr;
154
155 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900156 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900157}
158
Masami Hiramatsu53305922018-04-25 21:18:03 +0900159/* Note that we don't verify it, since the code does not come from user space */
160static int
161process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900162 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900163{
164 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900165
166 /* 1st stage: get value from context */
167 switch (code->op) {
168 case FETCH_OP_REG:
169 val = regs_get_register(regs, code->param);
170 break;
171 case FETCH_OP_STACK:
172 val = get_user_stack_nth(regs, code->param);
173 break;
174 case FETCH_OP_STACKP:
175 val = user_stack_pointer(regs);
176 break;
177 case FETCH_OP_RETVAL:
178 val = regs_return_value(regs);
179 break;
180 case FETCH_OP_IMM:
181 val = code->immediate;
182 break;
183 case FETCH_OP_FOFFS:
184 val = translate_user_vaddr(code->immediate);
185 break;
186 default:
187 return -EILSEQ;
188 }
189 code++;
190
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900191 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900192}
193NOKPROBE_SYMBOL(process_fetch_insn)
194
Oleg Nesterov736288b2013-02-03 20:58:35 +0100195static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
196{
197 rwlock_init(&filter->rwlock);
198 filter->nr_systemwide = 0;
199 INIT_LIST_HEAD(&filter->perf_events);
200}
201
202static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
203{
204 return !filter->nr_systemwide && list_empty(&filter->perf_events);
205}
206
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100207static inline bool is_ret_probe(struct trace_uprobe *tu)
208{
209 return tu->consumer.ret_handler != NULL;
210}
211
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530212/*
213 * Allocate new trace_uprobe and initialize it (including uprobes).
214 */
215static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100216alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530217{
218 struct trace_uprobe *tu;
219
220 if (!event || !is_good_name(event))
221 return ERR_PTR(-EINVAL);
222
223 if (!group || !is_good_name(group))
224 return ERR_PTR(-EINVAL);
225
226 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
227 if (!tu)
228 return ERR_PTR(-ENOMEM);
229
Namhyung Kim14577c32013-07-03 15:42:53 +0900230 tu->tp.call.class = &tu->tp.class;
231 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
232 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530233 goto error;
234
Namhyung Kim14577c32013-07-03 15:42:53 +0900235 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
236 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530237 goto error;
238
239 INIT_LIST_HEAD(&tu->list);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900240 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100241 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100242 if (is_ret)
243 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100244 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530245 return tu;
246
247error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900248 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530249 kfree(tu);
250
251 return ERR_PTR(-ENOMEM);
252}
253
254static void free_trace_uprobe(struct trace_uprobe *tu)
255{
256 int i;
257
Namhyung Kim14577c32013-07-03 15:42:53 +0900258 for (i = 0; i < tu->tp.nr_args; i++)
259 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530260
Song Liu0c92c7a2018-04-23 10:21:34 -0700261 path_put(&tu->path);
Namhyung Kim14577c32013-07-03 15:42:53 +0900262 kfree(tu->tp.call.class->system);
263 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530264 kfree(tu->filename);
265 kfree(tu);
266}
267
268static struct trace_uprobe *find_probe_event(const char *event, const char *group)
269{
270 struct trace_uprobe *tu;
271
272 list_for_each_entry(tu, &uprobe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400273 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900274 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530275 return tu;
276
277 return NULL;
278}
279
280/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400281static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530282{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400283 int ret;
284
285 ret = unregister_uprobe_event(tu);
286 if (ret)
287 return ret;
288
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530289 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530290 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400291 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530292}
293
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530294/*
295 * Uprobe with multiple reference counter is not allowed. i.e.
296 * If inode and offset matches, reference counter offset *must*
297 * match as well. Though, there is one exception: If user is
298 * replacing old trace_uprobe with new one(same group/event),
299 * then we allow same uprobe with new reference counter as far
300 * as the new one does not conflict with any other existing
301 * ones.
302 */
303static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
304{
305 struct trace_uprobe *tmp, *old = NULL;
306 struct inode *new_inode = d_real_inode(new->path.dentry);
307
308 old = find_probe_event(trace_event_name(&new->tp.call),
309 new->tp.call.class->system);
310
311 list_for_each_entry(tmp, &uprobe_list, list) {
312 if ((old ? old != tmp : true) &&
313 new_inode == d_real_inode(tmp->path.dentry) &&
314 new->offset == tmp->offset &&
315 new->ref_ctr_offset != tmp->ref_ctr_offset) {
316 pr_warn("Reference counter offset mismatch.");
317 return ERR_PTR(-EINVAL);
318 }
319 }
320 return old;
321}
322
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530323/* Register a trace_uprobe and probe_event */
324static int register_trace_uprobe(struct trace_uprobe *tu)
325{
Namhyung Kim14577c32013-07-03 15:42:53 +0900326 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530327 int ret;
328
329 mutex_lock(&uprobe_lock);
330
331 /* register as an event */
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530332 old_tu = find_old_trace_uprobe(tu);
333 if (IS_ERR(old_tu)) {
334 ret = PTR_ERR(old_tu);
335 goto end;
336 }
337
Namhyung Kim14577c32013-07-03 15:42:53 +0900338 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530339 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900340 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400341 if (ret)
342 goto end;
343 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530344
345 ret = register_uprobe_event(tu);
346 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700347 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530348 goto end;
349 }
350
351 list_add_tail(&tu->list, &uprobe_list);
352
353end:
354 mutex_unlock(&uprobe_lock);
355
356 return ret;
357}
358
359/*
360 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900361 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530362 *
363 * - Remove uprobe: -:[GRP/]EVENT
364 */
365static int create_trace_uprobe(int argc, char **argv)
366{
367 struct trace_uprobe *tu;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530368 char *arg, *event, *group, *filename, *rctr, *rctr_end;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530369 char buf[MAX_EVENT_NAME_LEN];
370 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530371 unsigned long offset, ref_ctr_offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100372 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530373 int i, ret;
374
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530375 ret = 0;
376 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100377 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530378 event = NULL;
379 group = NULL;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530380 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530381
382 /* argc must be >= 1 */
383 if (argv[0][0] == '-')
384 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100385 else if (argv[0][0] == 'r')
386 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530387 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100388 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530389 return -EINVAL;
390 }
391
392 if (argv[0][1] == ':') {
393 event = &argv[0][2];
394 arg = strchr(event, '/');
395
396 if (arg) {
397 group = event;
398 event = arg + 1;
399 event[-1] = '\0';
400
401 if (strlen(group) == 0) {
402 pr_info("Group name is not specified\n");
403 return -EINVAL;
404 }
405 }
406 if (strlen(event) == 0) {
407 pr_info("Event name is not specified\n");
408 return -EINVAL;
409 }
410 }
411 if (!group)
412 group = UPROBE_EVENT_SYSTEM;
413
414 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400415 int ret;
416
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530417 if (!event) {
418 pr_info("Delete command needs an event name.\n");
419 return -EINVAL;
420 }
421 mutex_lock(&uprobe_lock);
422 tu = find_probe_event(event, group);
423
424 if (!tu) {
425 mutex_unlock(&uprobe_lock);
426 pr_info("Event %s/%s doesn't exist.\n", group, event);
427 return -ENOENT;
428 }
429 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400430 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530431 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400432 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530433 }
434
435 if (argc < 2) {
436 pr_info("Probe point is not specified.\n");
437 return -EINVAL;
438 }
Kenny Yu6496bb72017-01-13 08:58:34 -0800439 /* Find the last occurrence, in case the path contains ':' too. */
440 arg = strrchr(argv[1], ':');
Song Liu0c92c7a2018-04-23 10:21:34 -0700441 if (!arg)
442 return -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530443
444 *arg++ = '\0';
445 filename = argv[1];
446 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
447 if (ret)
Song Liu0c92c7a2018-04-23 10:21:34 -0700448 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530449
Song Liu0c92c7a2018-04-23 10:21:34 -0700450 if (!d_is_reg(path.dentry)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800451 ret = -EINVAL;
452 goto fail_address_parse;
453 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530454
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530455 /* Parse reference counter offset if specified. */
456 rctr = strchr(arg, '(');
457 if (rctr) {
458 rctr_end = strchr(rctr, ')');
459 if (rctr > rctr_end || *(rctr_end + 1) != 0) {
460 ret = -EINVAL;
461 pr_info("Invalid reference counter offset.\n");
462 goto fail_address_parse;
463 }
464
465 *rctr++ = '\0';
466 *rctr_end = '\0';
467 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
468 if (ret) {
469 pr_info("Invalid reference counter offset.\n");
470 goto fail_address_parse;
471 }
472 }
473
474 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100475 ret = kstrtoul(arg, 0, &offset);
476 if (ret)
477 goto fail_address_parse;
478
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530479 argc -= 2;
480 argv += 2;
481
482 /* setup a probe */
483 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800484 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530485 char *ptr;
486
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800487 tail = kstrdup(kbasename(filename), GFP_KERNEL);
488 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530489 ret = -ENOMEM;
490 goto fail_address_parse;
491 }
492
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530493 ptr = strpbrk(tail, ".-_");
494 if (ptr)
495 *ptr = '\0';
496
497 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
498 event = buf;
499 kfree(tail);
500 }
501
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100502 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530503 if (IS_ERR(tu)) {
504 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
505 ret = PTR_ERR(tu);
506 goto fail_address_parse;
507 }
508 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530509 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700510 tu->path = path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530511 tu->filename = kstrdup(filename, GFP_KERNEL);
512
513 if (!tu->filename) {
514 pr_info("Failed to allocate filename.\n");
515 ret = -ENOMEM;
516 goto error;
517 }
518
519 /* parse arguments */
520 ret = 0;
521 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900522 struct probe_arg *parg = &tu->tp.args[i];
523
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530524 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900525 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530526
527 /* Parse argument name */
528 arg = strchr(argv[i], '=');
529 if (arg) {
530 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900531 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530532 } else {
533 arg = argv[i];
534 /* If argument name is omitted, set "argN" */
535 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900536 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530537 }
538
Namhyung Kim14577c32013-07-03 15:42:53 +0900539 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530540 pr_info("Failed to allocate argument[%d] name.\n", i);
541 ret = -ENOMEM;
542 goto error;
543 }
544
Namhyung Kim14577c32013-07-03 15:42:53 +0900545 if (!is_good_name(parg->name)) {
546 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530547 ret = -EINVAL;
548 goto error;
549 }
550
Namhyung Kim14577c32013-07-03 15:42:53 +0900551 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530552 pr_info("Argument[%d] name '%s' conflicts with "
553 "another field.\n", i, argv[i]);
554 ret = -EINVAL;
555 goto error;
556 }
557
558 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900559 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900560 is_return ? TPARG_FL_RETURN : 0);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530561 if (ret) {
562 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
563 goto error;
564 }
565 }
566
567 ret = register_trace_uprobe(tu);
568 if (ret)
569 goto error;
570 return 0;
571
572error:
573 free_trace_uprobe(tu);
574 return ret;
575
576fail_address_parse:
Song Liu0c92c7a2018-04-23 10:21:34 -0700577 path_put(&path);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530578
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800579 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530580
581 return ret;
582}
583
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400584static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530585{
586 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400587 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530588
589 mutex_lock(&uprobe_lock);
590 while (!list_empty(&uprobe_list)) {
591 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400592 ret = unregister_trace_uprobe(tu);
593 if (ret)
594 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530595 }
596 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400597 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530598}
599
600/* Probes listing interfaces */
601static void *probes_seq_start(struct seq_file *m, loff_t *pos)
602{
603 mutex_lock(&uprobe_lock);
604 return seq_list_start(&uprobe_list, *pos);
605}
606
607static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
608{
609 return seq_list_next(v, &uprobe_list, pos);
610}
611
612static void probes_seq_stop(struct seq_file *m, void *v)
613{
614 mutex_unlock(&uprobe_lock);
615}
616
617static int probes_seq_show(struct seq_file *m, void *v)
618{
619 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100620 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530621 int i;
622
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530623 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
624 trace_event_name(&tu->tp.call), tu->filename,
625 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530626
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530627 if (tu->ref_ctr_offset)
628 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
629
Namhyung Kim14577c32013-07-03 15:42:53 +0900630 for (i = 0; i < tu->tp.nr_args; i++)
631 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530632
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100633 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530634 return 0;
635}
636
637static const struct seq_operations probes_seq_op = {
638 .start = probes_seq_start,
639 .next = probes_seq_next,
640 .stop = probes_seq_stop,
641 .show = probes_seq_show
642};
643
644static int probes_open(struct inode *inode, struct file *file)
645{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400646 int ret;
647
648 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
649 ret = cleanup_all_probes();
650 if (ret)
651 return ret;
652 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530653
654 return seq_open(file, &probes_seq_op);
655}
656
657static ssize_t probes_write(struct file *file, const char __user *buffer,
658 size_t count, loff_t *ppos)
659{
Tom Zanussi7e465ba2017-09-22 14:58:20 -0500660 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530661}
662
663static const struct file_operations uprobe_events_ops = {
664 .owner = THIS_MODULE,
665 .open = probes_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = seq_release,
669 .write = probes_write,
670};
671
672/* Probes profiling interfaces */
673static int probes_profile_seq_show(struct seq_file *m, void *v)
674{
675 struct trace_uprobe *tu = v;
676
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400677 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400678 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530679 return 0;
680}
681
682static const struct seq_operations profile_seq_op = {
683 .start = probes_seq_start,
684 .next = probes_seq_next,
685 .stop = probes_seq_stop,
686 .show = probes_profile_seq_show
687};
688
689static int profile_open(struct inode *inode, struct file *file)
690{
691 return seq_open(file, &profile_seq_op);
692}
693
694static const struct file_operations uprobe_profile_ops = {
695 .owner = THIS_MODULE,
696 .open = profile_open,
697 .read = seq_read,
698 .llseek = seq_lseek,
699 .release = seq_release,
700};
701
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900702struct uprobe_cpu_buffer {
703 struct mutex mutex;
704 void *buf;
705};
706static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
707static int uprobe_buffer_refcnt;
708
709static int uprobe_buffer_init(void)
710{
711 int cpu, err_cpu;
712
713 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
714 if (uprobe_cpu_buffer == NULL)
715 return -ENOMEM;
716
717 for_each_possible_cpu(cpu) {
718 struct page *p = alloc_pages_node(cpu_to_node(cpu),
719 GFP_KERNEL, 0);
720 if (p == NULL) {
721 err_cpu = cpu;
722 goto err;
723 }
724 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
725 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
726 }
727
728 return 0;
729
730err:
731 for_each_possible_cpu(cpu) {
732 if (cpu == err_cpu)
733 break;
734 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
735 }
736
737 free_percpu(uprobe_cpu_buffer);
738 return -ENOMEM;
739}
740
741static int uprobe_buffer_enable(void)
742{
743 int ret = 0;
744
745 BUG_ON(!mutex_is_locked(&event_mutex));
746
747 if (uprobe_buffer_refcnt++ == 0) {
748 ret = uprobe_buffer_init();
749 if (ret < 0)
750 uprobe_buffer_refcnt--;
751 }
752
753 return ret;
754}
755
756static void uprobe_buffer_disable(void)
757{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800758 int cpu;
759
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900760 BUG_ON(!mutex_is_locked(&event_mutex));
761
762 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800763 for_each_possible_cpu(cpu)
764 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
765 cpu)->buf);
766
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900767 free_percpu(uprobe_cpu_buffer);
768 uprobe_cpu_buffer = NULL;
769 }
770}
771
772static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
773{
774 struct uprobe_cpu_buffer *ucb;
775 int cpu;
776
777 cpu = raw_smp_processor_id();
778 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
779
780 /*
781 * Use per-cpu buffers for fastest access, but we might migrate
782 * so the mutex makes sure we have sole access to it.
783 */
784 mutex_lock(&ucb->mutex);
785
786 return ucb;
787}
788
789static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
790{
791 mutex_unlock(&ucb->mutex);
792}
793
Namhyung Kima43b9702014-01-17 17:08:36 +0900794static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900795 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900796 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400797 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530798{
799 struct uprobe_trace_entry_head *entry;
800 struct ring_buffer_event *event;
801 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100802 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900803 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400804 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530805
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400806 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900807
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900808 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100809 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530810
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400811 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900812 return;
813
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900814 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900815 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400816 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900817 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900818 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900819 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900820
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530821 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100822 if (is_ret_probe(tu)) {
823 entry->vaddr[0] = func;
824 entry->vaddr[1] = instruction_pointer(regs);
825 data = DATAOF_TRACE_ENTRY(entry, true);
826 } else {
827 entry->vaddr[0] = instruction_pointer(regs);
828 data = DATAOF_TRACE_ENTRY(entry, false);
829 }
830
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900831 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530832
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400833 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100834}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100835
Oleg Nesterova51cc602013-03-30 18:02:12 +0100836/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900837static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
838 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100839{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900840 struct event_file_link *link;
841
842 if (is_ret_probe(tu))
843 return 0;
844
845 rcu_read_lock();
846 list_for_each_entry_rcu(link, &tu->tp.files, list)
847 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
848 rcu_read_unlock();
849
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100850 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530851}
852
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100853static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900854 struct pt_regs *regs,
855 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100856{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900857 struct event_file_link *link;
858
859 rcu_read_lock();
860 list_for_each_entry_rcu(link, &tu->tp.files, list)
861 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
862 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100863}
864
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530865/* Event entry printers */
866static enum print_line_t
867print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
868{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100869 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530870 struct trace_seq *s = &iter->seq;
871 struct trace_uprobe *tu;
872 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530873
Oleg Nesterov457d1772013-03-29 18:26:51 +0100874 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900875 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530876
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100877 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500878 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400879 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500880 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100881 data = DATAOF_TRACE_ENTRY(entry, true);
882 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500883 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400884 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500885 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100886 data = DATAOF_TRACE_ENTRY(entry, false);
887 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530888
Masami Hiramatsu56de7632018-04-25 21:16:36 +0900889 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
890 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530891
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500892 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530893
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500894 out:
895 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530896}
897
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100898typedef bool (*filter_func_t)(struct uprobe_consumer *self,
899 enum uprobe_filter_ctx ctx,
900 struct mm_struct *mm);
901
902static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400903probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900904 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530905{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900906 bool enabled = trace_probe_is_enabled(&tu->tp);
907 struct event_file_link *link = NULL;
908 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530909
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900910 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200911 if (tu->tp.flags & TP_FLAG_PROFILE)
912 return -EINTR;
913
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900914 link = kmalloc(sizeof(*link), GFP_KERNEL);
915 if (!link)
916 return -ENOMEM;
917
918 link->file = file;
919 list_add_tail_rcu(&link->list, &tu->tp.files);
920
921 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200922 } else {
923 if (tu->tp.flags & TP_FLAG_TRACE)
924 return -EINTR;
925
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900926 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200927 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530928
Oleg Nesterov736288b2013-02-03 20:58:35 +0100929 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
930
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900931 if (enabled)
932 return 0;
933
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200934 ret = uprobe_buffer_enable();
935 if (ret)
936 goto err_flags;
937
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100938 tu->consumer.filter = filter;
Song Liu0c92c7a2018-04-23 10:21:34 -0700939 tu->inode = d_real_inode(tu->path.dentry);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530940 if (tu->ref_ctr_offset) {
941 ret = uprobe_register_refctr(tu->inode, tu->offset,
942 tu->ref_ctr_offset, &tu->consumer);
943 } else {
944 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
945 }
946
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200947 if (ret)
948 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100949
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200950 return 0;
951
952 err_buffer:
953 uprobe_buffer_disable();
954
955 err_flags:
956 if (file) {
957 list_del(&link->list);
958 kfree(link);
959 tu->tp.flags &= ~TP_FLAG_TRACE;
960 } else {
961 tu->tp.flags &= ~TP_FLAG_PROFILE;
962 }
Oleg Nesterov41618242013-01-27 18:36:24 +0100963 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530964}
965
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900966static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400967probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530968{
Namhyung Kim14577c32013-07-03 15:42:53 +0900969 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530970 return;
971
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900972 if (file) {
973 struct event_file_link *link;
974
975 link = find_event_file_link(&tu->tp, file);
976 if (!link)
977 return;
978
979 list_del_rcu(&link->list);
980 /* synchronize with u{,ret}probe_trace_func */
Steven Rostedt (VMware)016f8ff2018-08-09 15:37:59 -0400981 synchronize_rcu();
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900982 kfree(link);
983
984 if (!list_empty(&tu->tp.files))
985 return;
986 }
987
Oleg Nesterov736288b2013-02-03 20:58:35 +0100988 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
989
Oleg Nesterova932b732013-01-31 19:47:23 +0100990 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
Song Liu0c92c7a2018-04-23 10:21:34 -0700991 tu->inode = NULL;
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900992 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900993
994 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530995}
996
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400997static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530998{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +0900999 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301000 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001001 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301002
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001003 if (is_ret_probe(tu)) {
1004 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1005 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1006 size = SIZEOF_TRACE_ENTRY(true);
1007 } else {
1008 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1009 size = SIZEOF_TRACE_ENTRY(false);
1010 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001011
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001012 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301013}
1014
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301015#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001016static bool
1017__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1018{
1019 struct perf_event *event;
1020
1021 if (filter->nr_systemwide)
1022 return true;
1023
1024 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001025 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001026 return true;
1027 }
1028
1029 return false;
1030}
1031
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001032static inline bool
1033uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1034{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001035 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001036}
1037
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001038static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1039{
1040 bool done;
1041
1042 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001043 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001044 list_del(&event->hw.tp_list);
1045 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001046 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001047 uprobe_filter_event(tu, event);
1048 } else {
1049 tu->filter.nr_systemwide--;
1050 done = tu->filter.nr_systemwide;
1051 }
1052 write_unlock(&tu->filter.rwlock);
1053
1054 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001055 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001056
1057 return 0;
1058}
1059
Oleg Nesterov736288b2013-02-03 20:58:35 +01001060static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1061{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001062 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001063 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001064
Oleg Nesterov736288b2013-02-03 20:58:35 +01001065 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001066 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001067 /*
1068 * event->parent != NULL means copy_process(), we can avoid
1069 * uprobe_apply(). current->mm must be probed and we can rely
1070 * on dup_mmap() which preserves the already installed bp's.
1071 *
1072 * attr.enable_on_exec means that exec/mmap will install the
1073 * breakpoints we need.
1074 */
1075 done = tu->filter.nr_systemwide ||
1076 event->parent || event->attr.enable_on_exec ||
1077 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001078 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001079 } else {
1080 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001081 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001082 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001083 write_unlock(&tu->filter.rwlock);
1084
Oleg Nesterov927d6872014-04-24 13:33:31 +02001085 err = 0;
1086 if (!done) {
1087 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1088 if (err)
1089 uprobe_perf_close(tu, event);
1090 }
1091 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001092}
1093
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001094static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1095 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1096{
1097 struct trace_uprobe *tu;
1098 int ret;
1099
1100 tu = container_of(uc, struct trace_uprobe, consumer);
1101 read_lock(&tu->filter.rwlock);
1102 ret = __uprobe_perf_filter(&tu->filter, mm);
1103 read_unlock(&tu->filter.rwlock);
1104
1105 return ret;
1106}
1107
Namhyung Kima43b9702014-01-17 17:08:36 +09001108static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001109 unsigned long func, struct pt_regs *regs,
1110 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301111{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001112 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301113 struct uprobe_trace_entry_head *entry;
1114 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001115 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001116 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001117 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301118
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001119 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Wang Nan04a22fa2015-07-01 02:13:50 +00001120 return;
1121
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001122 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1123
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001124 size = esize + tu->tp.size + dsize;
1125 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1126 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1127 return;
1128
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301129 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001130 head = this_cpu_ptr(call->perf_events);
1131 if (hlist_empty(head))
1132 goto out;
1133
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001134 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301135 if (!entry)
1136 goto out;
1137
Oleg Nesterov393a7362013-03-30 18:46:22 +01001138 if (is_ret_probe(tu)) {
1139 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001140 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001141 data = DATAOF_TRACE_ENTRY(entry, true);
1142 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001143 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001144 data = DATAOF_TRACE_ENTRY(entry, false);
1145 }
1146
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001147 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001148
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001149 if (size - esize > tu->tp.size + dsize) {
1150 int len = tu->tp.size + dsize;
1151
1152 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001153 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301154
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001155 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001156 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301157 out:
1158 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001159}
1160
1161/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001162static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1163 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001164{
1165 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1166 return UPROBE_HANDLER_REMOVE;
1167
Oleg Nesterov393a7362013-03-30 18:46:22 +01001168 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001169 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001170 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301171}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001172
1173static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001174 struct pt_regs *regs,
1175 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001176{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001177 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001178}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001179
1180int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1181 const char **filename, u64 *probe_offset,
1182 bool perf_type_tracepoint)
1183{
1184 const char *pevent = trace_event_name(event->tp_event);
1185 const char *group = event->tp_event->class->system;
1186 struct trace_uprobe *tu;
1187
1188 if (perf_type_tracepoint)
1189 tu = find_probe_event(pevent, group);
1190 else
1191 tu = event->tp_event->data;
1192 if (!tu)
1193 return -EINVAL;
1194
1195 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1196 : BPF_FD_TYPE_UPROBE;
1197 *filename = tu->filename;
1198 *probe_offset = tu->offset;
1199 return 0;
1200}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301201#endif /* CONFIG_PERF_EVENTS */
1202
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001203static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001204trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001205 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301206{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001207 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001208 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301209
1210 switch (type) {
1211 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001212 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301213
1214 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001215 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301216 return 0;
1217
1218#ifdef CONFIG_PERF_EVENTS
1219 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001220 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301221
1222 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001223 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301224 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001225
1226 case TRACE_REG_PERF_OPEN:
1227 return uprobe_perf_open(tu, data);
1228
1229 case TRACE_REG_PERF_CLOSE:
1230 return uprobe_perf_close(tu, data);
1231
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301232#endif
1233 default:
1234 return 0;
1235 }
1236 return 0;
1237}
1238
1239static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1240{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301241 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001242 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001243 struct uprobe_cpu_buffer *ucb;
1244 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001245 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301246
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001247
Oleg Nesterova932b732013-01-31 19:47:23 +01001248 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001249 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301250
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001251 udd.tu = tu;
1252 udd.bp_addr = instruction_pointer(regs);
1253
1254 current->utask->vaddr = (unsigned long) &udd;
1255
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001256 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1257 return 0;
1258
1259 dsize = __get_data_size(&tu->tp, regs);
1260 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1261
1262 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001263 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001264
Namhyung Kim14577c32013-07-03 15:42:53 +09001265 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001266 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301267
1268#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001269 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001270 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301271#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001272 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001273 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301274}
1275
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001276static int uretprobe_dispatcher(struct uprobe_consumer *con,
1277 unsigned long func, struct pt_regs *regs)
1278{
1279 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001280 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001281 struct uprobe_cpu_buffer *ucb;
1282 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001283
1284 tu = container_of(con, struct trace_uprobe, consumer);
1285
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001286 udd.tu = tu;
1287 udd.bp_addr = func;
1288
1289 current->utask->vaddr = (unsigned long) &udd;
1290
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001291 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1292 return 0;
1293
1294 dsize = __get_data_size(&tu->tp, regs);
1295 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1296
1297 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001298 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001299
Namhyung Kim14577c32013-07-03 15:42:53 +09001300 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001301 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001302
1303#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001304 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001305 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001306#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001307 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001308 return 0;
1309}
1310
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301311static struct trace_event_functions uprobe_funcs = {
1312 .trace = print_uprobe_event
1313};
1314
Song Liu33ea4b22017-12-06 14:45:16 -08001315static inline void init_trace_event_call(struct trace_uprobe *tu,
1316 struct trace_event_call *call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301317{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301318 INIT_LIST_HEAD(&call->class->fields);
1319 call->event.funcs = &uprobe_funcs;
1320 call->class->define_fields = uprobe_event_define_fields;
1321
Song Liu33ea4b22017-12-06 14:45:16 -08001322 call->flags = TRACE_EVENT_FL_UPROBE;
1323 call->class->reg = trace_uprobe_register;
1324 call->data = tu;
1325}
1326
1327static int register_uprobe_event(struct trace_uprobe *tu)
1328{
1329 struct trace_event_call *call = &tu->tp.call;
1330 int ret = 0;
1331
1332 init_trace_event_call(tu, call);
1333
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001334 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301335 return -ENOMEM;
1336
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001337 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301338 if (!ret) {
1339 kfree(call->print_fmt);
1340 return -ENODEV;
1341 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001342
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301343 ret = trace_add_event_call(call);
1344
1345 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001346 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001347 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301348 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001349 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301350 }
1351
1352 return ret;
1353}
1354
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001355static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301356{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001357 int ret;
1358
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301359 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001360 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001361 if (ret)
1362 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001363 kfree(tu->tp.call.print_fmt);
1364 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001365 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301366}
1367
Song Liu33ea4b22017-12-06 14:45:16 -08001368#ifdef CONFIG_PERF_EVENTS
1369struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001370create_local_trace_uprobe(char *name, unsigned long offs,
1371 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001372{
1373 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001374 struct path path;
1375 int ret;
1376
1377 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1378 if (ret)
1379 return ERR_PTR(ret);
1380
Song Liu0c92c7a2018-04-23 10:21:34 -07001381 if (!d_is_reg(path.dentry)) {
1382 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001383 return ERR_PTR(-EINVAL);
1384 }
1385
1386 /*
1387 * local trace_kprobes are not added to probe_list, so they are never
1388 * searched in find_trace_kprobe(). Therefore, there is no concern of
1389 * duplicated name "DUMMY_EVENT" here.
1390 */
1391 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1392 is_return);
1393
1394 if (IS_ERR(tu)) {
1395 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1396 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001397 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001398 return ERR_CAST(tu);
1399 }
1400
1401 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001402 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001403 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001404 tu->filename = kstrdup(name, GFP_KERNEL);
1405 init_trace_event_call(tu, &tu->tp.call);
1406
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001407 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001408 ret = -ENOMEM;
1409 goto error;
1410 }
1411
1412 return &tu->tp.call;
1413error:
1414 free_trace_uprobe(tu);
1415 return ERR_PTR(ret);
1416}
1417
1418void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1419{
1420 struct trace_uprobe *tu;
1421
1422 tu = container_of(event_call, struct trace_uprobe, tp.call);
1423
1424 kfree(tu->tp.call.print_fmt);
1425 tu->tp.call.print_fmt = NULL;
1426
1427 free_trace_uprobe(tu);
1428}
1429#endif /* CONFIG_PERF_EVENTS */
1430
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301431/* Make a trace interface for controling probe points */
1432static __init int init_uprobe_trace(void)
1433{
1434 struct dentry *d_tracer;
1435
1436 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001437 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301438 return 0;
1439
1440 trace_create_file("uprobe_events", 0644, d_tracer,
1441 NULL, &uprobe_events_ops);
1442 /* Profile interface */
1443 trace_create_file("uprobe_profile", 0444, d_tracer,
1444 NULL, &uprobe_profile_ops);
1445 return 0;
1446}
1447
1448fs_initcall(init_uprobe_trace);