blob: 1ceedb9146b114e6225a7662044c6b00c4cba136 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Andreas Zieglerea6eb5e2019-01-17 14:30:23 +01008#define pr_fmt(fmt) "trace_uprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
Masami Hiramatsu0597c492018-11-05 18:03:04 +090010#include <linux/ctype.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053011#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/uprobes.h>
14#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080015#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010016#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053017
Masami Hiramatsu0597c492018-11-05 18:03:04 +090018#include "trace_dynevent.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053019#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090020#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053021
22#define UPROBE_EVENT_SYSTEM "uprobes"
23
Oleg Nesterov457d1772013-03-29 18:26:51 +010024struct uprobe_trace_entry_head {
25 struct trace_entry ent;
26 unsigned long vaddr[];
27};
28
29#define SIZEOF_TRACE_ENTRY(is_return) \
30 (sizeof(struct uprobe_trace_entry_head) + \
31 sizeof(unsigned long) * (is_return ? 2 : 1))
32
33#define DATAOF_TRACE_ENTRY(entry, is_return) \
34 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35
Oleg Nesterov736288b2013-02-03 20:58:35 +010036struct trace_uprobe_filter {
37 rwlock_t rwlock;
38 int nr_systemwide;
39 struct list_head perf_events;
40};
41
Masami Hiramatsu0597c492018-11-05 18:03:04 +090042static int trace_uprobe_create(int argc, const char **argv);
43static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44static int trace_uprobe_release(struct dyn_event *ev);
45static bool trace_uprobe_is_busy(struct dyn_event *ev);
46static bool trace_uprobe_match(const char *system, const char *event,
47 struct dyn_event *ev);
48
49static struct dyn_event_operations trace_uprobe_ops = {
50 .create = trace_uprobe_create,
51 .show = trace_uprobe_show,
52 .is_busy = trace_uprobe_is_busy,
53 .free = trace_uprobe_release,
54 .match = trace_uprobe_match,
55};
56
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053057/*
58 * uprobe event core functions
59 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053060struct trace_uprobe {
Masami Hiramatsu0597c492018-11-05 18:03:04 +090061 struct dyn_event devent;
Oleg Nesterov736288b2013-02-03 20:58:35 +010062 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010063 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070064 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065 struct inode *inode;
66 char *filename;
67 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053068 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053069 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090070 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053071};
72
Masami Hiramatsu0597c492018-11-05 18:03:04 +090073static bool is_trace_uprobe(struct dyn_event *ev)
74{
75 return ev->ops == &trace_uprobe_ops;
76}
77
78static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79{
80 return container_of(ev, struct trace_uprobe, devent);
81}
82
83/**
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
87 */
88#define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
Namhyung Kim14577c32013-07-03 15:42:53 +090092#define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053094 (sizeof(struct probe_arg) * (n)))
95
96static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040097static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053098
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090099struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
102};
103
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530104static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100105static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530107
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900108#ifdef CONFIG_STACK_GROWSUP
109static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110{
111 return addr - (n * sizeof(long));
112}
113#else
114static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115{
116 return addr + (n * sizeof(long));
117}
118#endif
119
120static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121{
122 unsigned long ret;
123 unsigned long addr = user_stack_pointer(regs);
124
125 addr = adjust_stack_addr(addr, n);
126
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128 return 0;
129
130 return ret;
131}
132
133/*
134 * Uprobes-specific fetch functions
135 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900136static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900137probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900138{
139 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900140
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900142}
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +0900143
144static nokprobe_inline int
145probe_mem_read_user(void *dest, void *src, size_t size)
146{
147 return probe_mem_read(dest, src, size);
148}
149
Namhyung Kim5baaa592013-11-26 15:21:04 +0900150/*
151 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152 * length and relative data location.
153 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900154static nokprobe_inline int
155fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900156{
157 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900158 u32 loc = *(u32 *)dest;
159 int maxlen = get_loc_len(loc);
160 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900161 void __user *src = (void __force __user *) addr;
162
Masami Hiramatsu91784122018-04-25 21:19:01 +0900163 if (unlikely(!maxlen))
164 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900165
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900166 if (addr == FETCH_TOKEN_COMM)
167 ret = strlcpy(dst, current->comm, maxlen);
168 else
169 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900170 if (ret >= 0) {
171 if (ret == maxlen)
172 dst[ret - 1] = '\0';
Andreas Ziegler07220692019-01-16 15:16:29 +0100173 else
174 /*
175 * Include the terminating null byte. In this case it
176 * was copied by strncpy_from_user but not accounted
177 * for in ret.
178 */
179 ret++;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900180 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900181 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900182
183 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900184}
185
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900186static nokprobe_inline int
187fetch_store_string_user(unsigned long addr, void *dest, void *base)
188{
189 return fetch_store_string(addr, dest, base);
190}
191
Masami Hiramatsu53305922018-04-25 21:18:03 +0900192/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900193static nokprobe_inline int
194fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900195{
196 int len;
197 void __user *vaddr = (void __force __user *) addr;
198
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900199 if (addr == FETCH_TOKEN_COMM)
200 len = strlen(current->comm) + 1;
201 else
202 len = strnlen_user(vaddr, MAX_STRING_SIZE);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900203
Masami Hiramatsu91784122018-04-25 21:19:01 +0900204 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900205}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900206
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900207static nokprobe_inline int
208fetch_store_strlen_user(unsigned long addr)
209{
210 return fetch_store_strlen(addr);
211}
212
Masami Hiramatsu53305922018-04-25 21:18:03 +0900213static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900214{
215 unsigned long base_addr;
216 struct uprobe_dispatch_data *udd;
217
218 udd = (void *) current->utask->vaddr;
219
220 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900221 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900222}
223
Masami Hiramatsu53305922018-04-25 21:18:03 +0900224/* Note that we don't verify it, since the code does not come from user space */
225static int
226process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900227 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900228{
229 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900230
231 /* 1st stage: get value from context */
232 switch (code->op) {
233 case FETCH_OP_REG:
234 val = regs_get_register(regs, code->param);
235 break;
236 case FETCH_OP_STACK:
237 val = get_user_stack_nth(regs, code->param);
238 break;
239 case FETCH_OP_STACKP:
240 val = user_stack_pointer(regs);
241 break;
242 case FETCH_OP_RETVAL:
243 val = regs_return_value(regs);
244 break;
245 case FETCH_OP_IMM:
246 val = code->immediate;
247 break;
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900248 case FETCH_OP_COMM:
249 val = FETCH_TOKEN_COMM;
250 break;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900251 case FETCH_OP_FOFFS:
252 val = translate_user_vaddr(code->immediate);
253 break;
254 default:
255 return -EILSEQ;
256 }
257 code++;
258
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900259 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900260}
261NOKPROBE_SYMBOL(process_fetch_insn)
262
Oleg Nesterov736288b2013-02-03 20:58:35 +0100263static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
264{
265 rwlock_init(&filter->rwlock);
266 filter->nr_systemwide = 0;
267 INIT_LIST_HEAD(&filter->perf_events);
268}
269
270static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
271{
272 return !filter->nr_systemwide && list_empty(&filter->perf_events);
273}
274
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100275static inline bool is_ret_probe(struct trace_uprobe *tu)
276{
277 return tu->consumer.ret_handler != NULL;
278}
279
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900280static bool trace_uprobe_is_busy(struct dyn_event *ev)
281{
282 struct trace_uprobe *tu = to_trace_uprobe(ev);
283
284 return trace_probe_is_enabled(&tu->tp);
285}
286
287static bool trace_uprobe_match(const char *system, const char *event,
288 struct dyn_event *ev)
289{
290 struct trace_uprobe *tu = to_trace_uprobe(ev);
291
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900292 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
293 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900294}
295
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530296/*
297 * Allocate new trace_uprobe and initialize it (including uprobes).
298 */
299static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100300alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530301{
302 struct trace_uprobe *tu;
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900303 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530304
305 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
306 if (!tu)
307 return ERR_PTR(-ENOMEM);
308
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900309 ret = trace_probe_init(&tu->tp, event, group);
310 if (ret < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530311 goto error;
312
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900313 dyn_event_init(&tu->devent, &trace_uprobe_ops);
Oleg Nesterova932b732013-01-31 19:47:23 +0100314 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100315 if (is_ret)
316 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100317 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530318 return tu;
319
320error:
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530321 kfree(tu);
322
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900323 return ERR_PTR(ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530324}
325
326static void free_trace_uprobe(struct trace_uprobe *tu)
327{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900328 if (!tu)
329 return;
330
Song Liu0c92c7a2018-04-23 10:21:34 -0700331 path_put(&tu->path);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900332 trace_probe_cleanup(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530333 kfree(tu->filename);
334 kfree(tu);
335}
336
337static struct trace_uprobe *find_probe_event(const char *event, const char *group)
338{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900339 struct dyn_event *pos;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530340 struct trace_uprobe *tu;
341
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900342 for_each_trace_uprobe(tu, pos)
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900343 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
344 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530345 return tu;
346
347 return NULL;
348}
349
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900350/* Unregister a trace_uprobe and probe_event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400351static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530352{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400353 int ret;
354
355 ret = unregister_uprobe_event(tu);
356 if (ret)
357 return ret;
358
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900359 dyn_event_remove(&tu->devent);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530360 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400361 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530362}
363
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530364/*
365 * Uprobe with multiple reference counter is not allowed. i.e.
366 * If inode and offset matches, reference counter offset *must*
367 * match as well. Though, there is one exception: If user is
368 * replacing old trace_uprobe with new one(same group/event),
369 * then we allow same uprobe with new reference counter as far
370 * as the new one does not conflict with any other existing
371 * ones.
372 */
373static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
374{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900375 struct dyn_event *pos;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530376 struct trace_uprobe *tmp, *old = NULL;
377 struct inode *new_inode = d_real_inode(new->path.dentry);
378
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900379 old = find_probe_event(trace_probe_name(&new->tp),
380 trace_probe_group_name(&new->tp));
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530381
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900382 for_each_trace_uprobe(tmp, pos) {
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530383 if ((old ? old != tmp : true) &&
384 new_inode == d_real_inode(tmp->path.dentry) &&
385 new->offset == tmp->offset &&
386 new->ref_ctr_offset != tmp->ref_ctr_offset) {
387 pr_warn("Reference counter offset mismatch.");
388 return ERR_PTR(-EINVAL);
389 }
390 }
391 return old;
392}
393
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530394/* Register a trace_uprobe and probe_event */
395static int register_trace_uprobe(struct trace_uprobe *tu)
396{
Namhyung Kim14577c32013-07-03 15:42:53 +0900397 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530398 int ret;
399
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900400 mutex_lock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530401
402 /* register as an event */
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530403 old_tu = find_old_trace_uprobe(tu);
404 if (IS_ERR(old_tu)) {
405 ret = PTR_ERR(old_tu);
406 goto end;
407 }
408
Namhyung Kim14577c32013-07-03 15:42:53 +0900409 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530410 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900411 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400412 if (ret)
413 goto end;
414 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530415
416 ret = register_uprobe_event(tu);
417 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700418 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530419 goto end;
420 }
421
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900422 dyn_event_add(&tu->devent);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530423
424end:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900425 mutex_unlock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530426
427 return ret;
428}
429
430/*
431 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900432 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530433 */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900434static int trace_uprobe_create(int argc, const char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530435{
436 struct trace_uprobe *tu;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900437 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
438 char *arg, *filename, *rctr, *rctr_end, *tmp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530439 char buf[MAX_EVENT_NAME_LEN];
440 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530441 unsigned long offset, ref_ctr_offset;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900442 bool is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530443 int i, ret;
444
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530445 ret = 0;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530446 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530447
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900448 switch (argv[0][0]) {
449 case 'r':
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100450 is_return = true;
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900451 break;
452 case 'p':
453 break;
454 default:
455 return -ECANCELED;
456 }
457
458 if (argc < 2)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900459 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530460
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900461 if (argv[0][1] == ':')
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530462 event = &argv[0][2];
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530463
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900464 if (!strchr(argv[1], '/'))
465 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530466
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900467 filename = kstrdup(argv[1], GFP_KERNEL);
468 if (!filename)
469 return -ENOMEM;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530470
Kenny Yu6496bb72017-01-13 08:58:34 -0800471 /* Find the last occurrence, in case the path contains ':' too. */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900472 arg = strrchr(filename, ':');
473 if (!arg || !isdigit(arg[1])) {
474 kfree(filename);
475 return -ECANCELED;
476 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530477
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500478 trace_probe_log_init("trace_uprobe", argc, argv);
479 trace_probe_log_set_index(1); /* filename is the 2nd argument */
480
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530481 *arg++ = '\0';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530482 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900483 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500484 trace_probe_log_err(0, FILE_NOT_FOUND);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900485 kfree(filename);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500486 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700487 return ret;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900488 }
Song Liu0c92c7a2018-04-23 10:21:34 -0700489 if (!d_is_reg(path.dentry)) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500490 trace_probe_log_err(0, NO_REGULAR_FILE);
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800491 ret = -EINVAL;
492 goto fail_address_parse;
493 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530494
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530495 /* Parse reference counter offset if specified. */
496 rctr = strchr(arg, '(');
497 if (rctr) {
498 rctr_end = strchr(rctr, ')');
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500499 if (!rctr_end) {
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530500 ret = -EINVAL;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500501 rctr_end = rctr + strlen(rctr);
502 trace_probe_log_err(rctr_end - filename,
503 REFCNT_OPEN_BRACE);
504 goto fail_address_parse;
505 } else if (rctr_end[1] != '\0') {
506 ret = -EINVAL;
507 trace_probe_log_err(rctr_end + 1 - filename,
508 BAD_REFCNT_SUFFIX);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530509 goto fail_address_parse;
510 }
511
512 *rctr++ = '\0';
513 *rctr_end = '\0';
514 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
515 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500516 trace_probe_log_err(rctr - filename, BAD_REFCNT);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530517 goto fail_address_parse;
518 }
519 }
520
521 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100522 ret = kstrtoul(arg, 0, &offset);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500523 if (ret) {
524 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100525 goto fail_address_parse;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500526 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530527
528 /* setup a probe */
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500529 trace_probe_log_set_index(0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900530 if (event) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500531 ret = traceprobe_parse_event_name(&event, &group, buf,
532 event - argv[0]);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900533 if (ret)
534 goto fail_address_parse;
535 } else {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800536 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530537 char *ptr;
538
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800539 tail = kstrdup(kbasename(filename), GFP_KERNEL);
540 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530541 ret = -ENOMEM;
542 goto fail_address_parse;
543 }
544
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530545 ptr = strpbrk(tail, ".-_");
546 if (ptr)
547 *ptr = '\0';
548
549 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
550 event = buf;
551 kfree(tail);
552 }
553
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500554 argc -= 2;
555 argv += 2;
556
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100557 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530558 if (IS_ERR(tu)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530559 ret = PTR_ERR(tu);
Masami Hiramatsua0394802019-03-14 13:30:50 +0900560 /* This must return -ENOMEM otherwise there is a bug */
561 WARN_ON_ONCE(ret != -ENOMEM);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530562 goto fail_address_parse;
563 }
564 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530565 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700566 tu->path = path;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900567 tu->filename = filename;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530568
569 /* parse arguments */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530570 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900571 tmp = kstrdup(argv[i], GFP_KERNEL);
572 if (!tmp) {
573 ret = -ENOMEM;
574 goto error;
575 }
576
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500577 trace_probe_log_set_index(i + 2);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900578 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900579 is_return ? TPARG_FL_RETURN : 0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900580 kfree(tmp);
Masami Hiramatsud00bbea92018-11-05 18:01:40 +0900581 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530582 goto error;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530583 }
584
Masami Hiramatsub4d4b962019-06-01 00:16:56 +0900585 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
586 if (ret < 0)
587 goto error;
588
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530589 ret = register_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500590 if (!ret)
591 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530592
593error:
594 free_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500595out:
596 trace_probe_log_clear();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530597 return ret;
598
599fail_address_parse:
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500600 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700601 path_put(&path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900602 kfree(filename);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530603
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530604 return ret;
605}
606
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900607static int create_or_delete_trace_uprobe(int argc, char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530608{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900609 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530610
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900611 if (argv[0][0] == '-')
612 return dyn_event_release(argc, argv, &trace_uprobe_ops);
613
614 ret = trace_uprobe_create(argc, (const char **)argv);
615 return ret == -ECANCELED ? -EINVAL : ret;
616}
617
618static int trace_uprobe_release(struct dyn_event *ev)
619{
620 struct trace_uprobe *tu = to_trace_uprobe(ev);
621
622 return unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530623}
624
625/* Probes listing interfaces */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900626static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530627{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900628 struct trace_uprobe *tu = to_trace_uprobe(ev);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100629 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530630 int i;
631
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900632 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
633 trace_probe_name(&tu->tp), tu->filename,
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530634 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530635
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530636 if (tu->ref_ctr_offset)
637 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
638
Namhyung Kim14577c32013-07-03 15:42:53 +0900639 for (i = 0; i < tu->tp.nr_args; i++)
640 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530641
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100642 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530643 return 0;
644}
645
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900646static int probes_seq_show(struct seq_file *m, void *v)
647{
648 struct dyn_event *ev = v;
649
650 if (!is_trace_uprobe(ev))
651 return 0;
652
653 return trace_uprobe_show(m, ev);
654}
655
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530656static const struct seq_operations probes_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900657 .start = dyn_event_seq_start,
658 .next = dyn_event_seq_next,
659 .stop = dyn_event_seq_stop,
660 .show = probes_seq_show
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530661};
662
663static int probes_open(struct inode *inode, struct file *file)
664{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400665 int ret;
666
667 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900668 ret = dyn_events_release_all(&trace_uprobe_ops);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400669 if (ret)
670 return ret;
671 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530672
673 return seq_open(file, &probes_seq_op);
674}
675
676static ssize_t probes_write(struct file *file, const char __user *buffer,
677 size_t count, loff_t *ppos)
678{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900679 return trace_parse_run_command(file, buffer, count, ppos,
680 create_or_delete_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530681}
682
683static const struct file_operations uprobe_events_ops = {
684 .owner = THIS_MODULE,
685 .open = probes_open,
686 .read = seq_read,
687 .llseek = seq_lseek,
688 .release = seq_release,
689 .write = probes_write,
690};
691
692/* Probes profiling interfaces */
693static int probes_profile_seq_show(struct seq_file *m, void *v)
694{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900695 struct dyn_event *ev = v;
696 struct trace_uprobe *tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530697
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900698 if (!is_trace_uprobe(ev))
699 return 0;
700
701 tu = to_trace_uprobe(ev);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400702 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900703 trace_probe_name(&tu->tp), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530704 return 0;
705}
706
707static const struct seq_operations profile_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900708 .start = dyn_event_seq_start,
709 .next = dyn_event_seq_next,
710 .stop = dyn_event_seq_stop,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530711 .show = probes_profile_seq_show
712};
713
714static int profile_open(struct inode *inode, struct file *file)
715{
716 return seq_open(file, &profile_seq_op);
717}
718
719static const struct file_operations uprobe_profile_ops = {
720 .owner = THIS_MODULE,
721 .open = profile_open,
722 .read = seq_read,
723 .llseek = seq_lseek,
724 .release = seq_release,
725};
726
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900727struct uprobe_cpu_buffer {
728 struct mutex mutex;
729 void *buf;
730};
731static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
732static int uprobe_buffer_refcnt;
733
734static int uprobe_buffer_init(void)
735{
736 int cpu, err_cpu;
737
738 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
739 if (uprobe_cpu_buffer == NULL)
740 return -ENOMEM;
741
742 for_each_possible_cpu(cpu) {
743 struct page *p = alloc_pages_node(cpu_to_node(cpu),
744 GFP_KERNEL, 0);
745 if (p == NULL) {
746 err_cpu = cpu;
747 goto err;
748 }
749 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
750 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
751 }
752
753 return 0;
754
755err:
756 for_each_possible_cpu(cpu) {
757 if (cpu == err_cpu)
758 break;
759 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
760 }
761
762 free_percpu(uprobe_cpu_buffer);
763 return -ENOMEM;
764}
765
766static int uprobe_buffer_enable(void)
767{
768 int ret = 0;
769
770 BUG_ON(!mutex_is_locked(&event_mutex));
771
772 if (uprobe_buffer_refcnt++ == 0) {
773 ret = uprobe_buffer_init();
774 if (ret < 0)
775 uprobe_buffer_refcnt--;
776 }
777
778 return ret;
779}
780
781static void uprobe_buffer_disable(void)
782{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800783 int cpu;
784
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900785 BUG_ON(!mutex_is_locked(&event_mutex));
786
787 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800788 for_each_possible_cpu(cpu)
789 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
790 cpu)->buf);
791
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900792 free_percpu(uprobe_cpu_buffer);
793 uprobe_cpu_buffer = NULL;
794 }
795}
796
797static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
798{
799 struct uprobe_cpu_buffer *ucb;
800 int cpu;
801
802 cpu = raw_smp_processor_id();
803 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
804
805 /*
806 * Use per-cpu buffers for fastest access, but we might migrate
807 * so the mutex makes sure we have sole access to it.
808 */
809 mutex_lock(&ucb->mutex);
810
811 return ucb;
812}
813
814static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
815{
816 mutex_unlock(&ucb->mutex);
817}
818
Namhyung Kima43b9702014-01-17 17:08:36 +0900819static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900820 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900821 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400822 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530823{
824 struct uprobe_trace_entry_head *entry;
825 struct ring_buffer_event *event;
826 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100827 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900828 int size, esize;
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +0900829 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530830
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400831 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900832
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900833 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100834 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530835
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400836 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900837 return;
838
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900839 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900840 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400841 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900842 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900843 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900844 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900845
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530846 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100847 if (is_ret_probe(tu)) {
848 entry->vaddr[0] = func;
849 entry->vaddr[1] = instruction_pointer(regs);
850 data = DATAOF_TRACE_ENTRY(entry, true);
851 } else {
852 entry->vaddr[0] = instruction_pointer(regs);
853 data = DATAOF_TRACE_ENTRY(entry, false);
854 }
855
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900856 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530857
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400858 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100859}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100860
Oleg Nesterova51cc602013-03-30 18:02:12 +0100861/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900862static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
863 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100864{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900865 struct event_file_link *link;
866
867 if (is_ret_probe(tu))
868 return 0;
869
870 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900871 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900872 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
873 rcu_read_unlock();
874
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100875 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530876}
877
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100878static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900879 struct pt_regs *regs,
880 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100881{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900882 struct event_file_link *link;
883
884 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900885 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900886 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
887 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100888}
889
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530890/* Event entry printers */
891static enum print_line_t
892print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
893{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100894 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530895 struct trace_seq *s = &iter->seq;
896 struct trace_uprobe *tu;
897 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530898
Oleg Nesterov457d1772013-03-29 18:26:51 +0100899 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900900 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530901
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100902 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500903 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900904 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500905 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100906 data = DATAOF_TRACE_ENTRY(entry, true);
907 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500908 trace_seq_printf(s, "%s: (0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900909 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500910 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100911 data = DATAOF_TRACE_ENTRY(entry, false);
912 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530913
Masami Hiramatsu56de7632018-04-25 21:16:36 +0900914 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
915 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530916
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500917 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530918
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500919 out:
920 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530921}
922
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100923typedef bool (*filter_func_t)(struct uprobe_consumer *self,
924 enum uprobe_filter_ctx ctx,
925 struct mm_struct *mm);
926
927static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400928probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900929 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530930{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900931 bool enabled = trace_probe_is_enabled(&tu->tp);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900932 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530933
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900934 if (file) {
Masami Hiramatsu747774d2019-06-01 00:17:37 +0900935 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Oleg Nesterov48212542014-06-27 19:01:36 +0200936 return -EINTR;
937
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900938 ret = trace_probe_add_file(&tu->tp, file);
939 if (ret < 0)
940 return ret;
Oleg Nesterov48212542014-06-27 19:01:36 +0200941 } else {
Masami Hiramatsu747774d2019-06-01 00:17:37 +0900942 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Oleg Nesterov48212542014-06-27 19:01:36 +0200943 return -EINTR;
944
Masami Hiramatsu747774d2019-06-01 00:17:37 +0900945 trace_probe_set_flag(&tu->tp, TP_FLAG_PROFILE);
Oleg Nesterov48212542014-06-27 19:01:36 +0200946 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530947
Oleg Nesterov736288b2013-02-03 20:58:35 +0100948 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
949
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900950 if (enabled)
951 return 0;
952
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200953 ret = uprobe_buffer_enable();
954 if (ret)
955 goto err_flags;
956
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100957 tu->consumer.filter = filter;
Song Liu0c92c7a2018-04-23 10:21:34 -0700958 tu->inode = d_real_inode(tu->path.dentry);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530959 if (tu->ref_ctr_offset) {
960 ret = uprobe_register_refctr(tu->inode, tu->offset,
961 tu->ref_ctr_offset, &tu->consumer);
962 } else {
963 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
964 }
965
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200966 if (ret)
967 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100968
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200969 return 0;
970
971 err_buffer:
972 uprobe_buffer_disable();
973
974 err_flags:
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900975 if (file)
976 trace_probe_remove_file(&tu->tp, file);
977 else
Masami Hiramatsu747774d2019-06-01 00:17:37 +0900978 trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900979
Oleg Nesterov41618242013-01-27 18:36:24 +0100980 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530981}
982
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900983static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400984probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530985{
Namhyung Kim14577c32013-07-03 15:42:53 +0900986 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530987 return;
988
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900989 if (file) {
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900990 if (trace_probe_remove_file(&tu->tp, file) < 0)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900991 return;
992
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900993 if (trace_probe_is_enabled(&tu->tp))
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900994 return;
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900995 } else
Masami Hiramatsu747774d2019-06-01 00:17:37 +0900996 trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900997
Oleg Nesterov736288b2013-02-03 20:58:35 +0100998 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
999
Oleg Nesterova932b732013-01-31 19:47:23 +01001000 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
Song Liu0c92c7a2018-04-23 10:21:34 -07001001 tu->inode = NULL;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001002
1003 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301004}
1005
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001006static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301007{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001008 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301009 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001010 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301011
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001012 if (is_ret_probe(tu)) {
1013 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1014 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1015 size = SIZEOF_TRACE_ENTRY(true);
1016 } else {
1017 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1018 size = SIZEOF_TRACE_ENTRY(false);
1019 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001020
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001021 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301022}
1023
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301024#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001025static bool
1026__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1027{
1028 struct perf_event *event;
1029
1030 if (filter->nr_systemwide)
1031 return true;
1032
1033 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001034 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001035 return true;
1036 }
1037
1038 return false;
1039}
1040
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001041static inline bool
1042uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1043{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001044 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001045}
1046
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001047static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1048{
1049 bool done;
1050
1051 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001052 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001053 list_del(&event->hw.tp_list);
1054 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001055 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001056 uprobe_filter_event(tu, event);
1057 } else {
1058 tu->filter.nr_systemwide--;
1059 done = tu->filter.nr_systemwide;
1060 }
1061 write_unlock(&tu->filter.rwlock);
1062
1063 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001064 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001065
1066 return 0;
1067}
1068
Oleg Nesterov736288b2013-02-03 20:58:35 +01001069static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1070{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001071 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001072 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001073
Oleg Nesterov736288b2013-02-03 20:58:35 +01001074 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001075 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001076 /*
1077 * event->parent != NULL means copy_process(), we can avoid
1078 * uprobe_apply(). current->mm must be probed and we can rely
1079 * on dup_mmap() which preserves the already installed bp's.
1080 *
1081 * attr.enable_on_exec means that exec/mmap will install the
1082 * breakpoints we need.
1083 */
1084 done = tu->filter.nr_systemwide ||
1085 event->parent || event->attr.enable_on_exec ||
1086 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001087 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001088 } else {
1089 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001090 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001091 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001092 write_unlock(&tu->filter.rwlock);
1093
Oleg Nesterov927d6872014-04-24 13:33:31 +02001094 err = 0;
1095 if (!done) {
1096 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1097 if (err)
1098 uprobe_perf_close(tu, event);
1099 }
1100 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001101}
1102
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001103static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1104 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1105{
1106 struct trace_uprobe *tu;
1107 int ret;
1108
1109 tu = container_of(uc, struct trace_uprobe, consumer);
1110 read_lock(&tu->filter.rwlock);
1111 ret = __uprobe_perf_filter(&tu->filter, mm);
1112 read_unlock(&tu->filter.rwlock);
1113
1114 return ret;
1115}
1116
Namhyung Kima43b9702014-01-17 17:08:36 +09001117static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001118 unsigned long func, struct pt_regs *regs,
1119 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301120{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001121 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301122 struct uprobe_trace_entry_head *entry;
1123 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001124 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001125 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001126 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301127
Yonghong Songe87c6bc2017-10-23 23:53:08 -07001128 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Wang Nan04a22fa2015-07-01 02:13:50 +00001129 return;
1130
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001131 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1132
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001133 size = esize + tu->tp.size + dsize;
1134 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1135 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1136 return;
1137
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301138 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001139 head = this_cpu_ptr(call->perf_events);
1140 if (hlist_empty(head))
1141 goto out;
1142
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001143 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301144 if (!entry)
1145 goto out;
1146
Oleg Nesterov393a7362013-03-30 18:46:22 +01001147 if (is_ret_probe(tu)) {
1148 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001149 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001150 data = DATAOF_TRACE_ENTRY(entry, true);
1151 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001152 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001153 data = DATAOF_TRACE_ENTRY(entry, false);
1154 }
1155
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001156 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001157
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001158 if (size - esize > tu->tp.size + dsize) {
1159 int len = tu->tp.size + dsize;
1160
1161 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001162 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301163
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001164 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001165 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301166 out:
1167 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001168}
1169
1170/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001171static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1172 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001173{
1174 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1175 return UPROBE_HANDLER_REMOVE;
1176
Oleg Nesterov393a7362013-03-30 18:46:22 +01001177 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001178 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001179 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301180}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001181
1182static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001183 struct pt_regs *regs,
1184 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001185{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001186 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001187}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001188
1189int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1190 const char **filename, u64 *probe_offset,
1191 bool perf_type_tracepoint)
1192{
1193 const char *pevent = trace_event_name(event->tp_event);
1194 const char *group = event->tp_event->class->system;
1195 struct trace_uprobe *tu;
1196
1197 if (perf_type_tracepoint)
1198 tu = find_probe_event(pevent, group);
1199 else
1200 tu = event->tp_event->data;
1201 if (!tu)
1202 return -EINVAL;
1203
1204 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1205 : BPF_FD_TYPE_UPROBE;
1206 *filename = tu->filename;
1207 *probe_offset = tu->offset;
1208 return 0;
1209}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301210#endif /* CONFIG_PERF_EVENTS */
1211
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001212static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001213trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001214 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301215{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001216 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001217 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301218
1219 switch (type) {
1220 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001221 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301222
1223 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001224 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301225 return 0;
1226
1227#ifdef CONFIG_PERF_EVENTS
1228 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001229 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301230
1231 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001232 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301233 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001234
1235 case TRACE_REG_PERF_OPEN:
1236 return uprobe_perf_open(tu, data);
1237
1238 case TRACE_REG_PERF_CLOSE:
1239 return uprobe_perf_close(tu, data);
1240
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301241#endif
1242 default:
1243 return 0;
1244 }
1245 return 0;
1246}
1247
1248static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1249{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301250 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001251 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001252 struct uprobe_cpu_buffer *ucb;
1253 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001254 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301255
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001256
Oleg Nesterova932b732013-01-31 19:47:23 +01001257 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001258 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301259
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001260 udd.tu = tu;
1261 udd.bp_addr = instruction_pointer(regs);
1262
1263 current->utask->vaddr = (unsigned long) &udd;
1264
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001265 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1266 return 0;
1267
1268 dsize = __get_data_size(&tu->tp, regs);
1269 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1270
1271 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001272 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001273
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001274 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001275 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301276
1277#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001278 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001279 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301280#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001281 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001282 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301283}
1284
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001285static int uretprobe_dispatcher(struct uprobe_consumer *con,
1286 unsigned long func, struct pt_regs *regs)
1287{
1288 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001289 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001290 struct uprobe_cpu_buffer *ucb;
1291 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001292
1293 tu = container_of(con, struct trace_uprobe, consumer);
1294
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001295 udd.tu = tu;
1296 udd.bp_addr = func;
1297
1298 current->utask->vaddr = (unsigned long) &udd;
1299
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001300 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1301 return 0;
1302
1303 dsize = __get_data_size(&tu->tp, regs);
1304 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1305
1306 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001307 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001308
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001309 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001310 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001311
1312#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001313 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001314 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001315#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001316 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001317 return 0;
1318}
1319
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301320static struct trace_event_functions uprobe_funcs = {
1321 .trace = print_uprobe_event
1322};
1323
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001324static inline void init_trace_event_call(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301325{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001326 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1327
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301328 call->event.funcs = &uprobe_funcs;
1329 call->class->define_fields = uprobe_event_define_fields;
1330
Song Liu9fd2e482019-05-07 09:15:45 -07001331 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
Song Liu33ea4b22017-12-06 14:45:16 -08001332 call->class->reg = trace_uprobe_register;
1333 call->data = tu;
1334}
1335
1336static int register_uprobe_event(struct trace_uprobe *tu)
1337{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001338 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001339
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001340 return trace_probe_register_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301341}
1342
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001343static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301344{
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001345 return trace_probe_unregister_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301346}
1347
Song Liu33ea4b22017-12-06 14:45:16 -08001348#ifdef CONFIG_PERF_EVENTS
1349struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001350create_local_trace_uprobe(char *name, unsigned long offs,
1351 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001352{
1353 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001354 struct path path;
1355 int ret;
1356
1357 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1358 if (ret)
1359 return ERR_PTR(ret);
1360
Song Liu0c92c7a2018-04-23 10:21:34 -07001361 if (!d_is_reg(path.dentry)) {
1362 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001363 return ERR_PTR(-EINVAL);
1364 }
1365
1366 /*
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001367 * local trace_kprobes are not added to dyn_event, so they are never
Song Liu33ea4b22017-12-06 14:45:16 -08001368 * searched in find_trace_kprobe(). Therefore, there is no concern of
1369 * duplicated name "DUMMY_EVENT" here.
1370 */
1371 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1372 is_return);
1373
1374 if (IS_ERR(tu)) {
1375 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1376 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001377 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001378 return ERR_CAST(tu);
1379 }
1380
1381 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001382 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001383 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001384 tu->filename = kstrdup(name, GFP_KERNEL);
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001385 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001386
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001387 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001388 ret = -ENOMEM;
1389 goto error;
1390 }
1391
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001392 return trace_probe_event_call(&tu->tp);
Song Liu33ea4b22017-12-06 14:45:16 -08001393error:
1394 free_trace_uprobe(tu);
1395 return ERR_PTR(ret);
1396}
1397
1398void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1399{
1400 struct trace_uprobe *tu;
1401
1402 tu = container_of(event_call, struct trace_uprobe, tp.call);
1403
Song Liu33ea4b22017-12-06 14:45:16 -08001404 free_trace_uprobe(tu);
1405}
1406#endif /* CONFIG_PERF_EVENTS */
1407
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301408/* Make a trace interface for controling probe points */
1409static __init int init_uprobe_trace(void)
1410{
1411 struct dentry *d_tracer;
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001412 int ret;
1413
1414 ret = dyn_event_register(&trace_uprobe_ops);
1415 if (ret)
1416 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301417
1418 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001419 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301420 return 0;
1421
1422 trace_create_file("uprobe_events", 0644, d_tracer,
1423 NULL, &uprobe_events_ops);
1424 /* Profile interface */
1425 trace_create_file("uprobe_profile", 0444, d_tracer,
1426 NULL, &uprobe_profile_ops);
1427 return 0;
1428}
1429
1430fs_initcall(init_uprobe_trace);