blob: 4f35514a48f39c3b56a70912c1bad1c7b11124aa [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Andreas Zieglerea6eb5e2019-01-17 14:30:23 +01008#define pr_fmt(fmt) "trace_uprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
Jakub Kicinskiaef2fed2021-12-15 18:55:37 -080010#include <linux/bpf-cgroup.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040011#include <linux/security.h>
Masami Hiramatsu0597c492018-11-05 18:03:04 +090012#include <linux/ctype.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053013#include <linux/module.h>
14#include <linux/uaccess.h>
15#include <linux/uprobes.h>
16#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080017#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010018#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053019
Masami Hiramatsu0597c492018-11-05 18:03:04 +090020#include "trace_dynevent.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053021#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090022#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053023
24#define UPROBE_EVENT_SYSTEM "uprobes"
25
Oleg Nesterov457d1772013-03-29 18:26:51 +010026struct uprobe_trace_entry_head {
27 struct trace_entry ent;
28 unsigned long vaddr[];
29};
30
31#define SIZEOF_TRACE_ENTRY(is_return) \
32 (sizeof(struct uprobe_trace_entry_head) + \
33 sizeof(unsigned long) * (is_return ? 2 : 1))
34
35#define DATAOF_TRACE_ENTRY(entry, is_return) \
36 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
37
Masami Hiramatsud2622712021-02-01 13:48:11 -060038static int trace_uprobe_create(const char *raw_command);
Masami Hiramatsu0597c492018-11-05 18:03:04 +090039static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
40static int trace_uprobe_release(struct dyn_event *ev);
41static bool trace_uprobe_is_busy(struct dyn_event *ev);
42static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +090043 int argc, const char **argv, struct dyn_event *ev);
Masami Hiramatsu0597c492018-11-05 18:03:04 +090044
45static struct dyn_event_operations trace_uprobe_ops = {
46 .create = trace_uprobe_create,
47 .show = trace_uprobe_show,
48 .is_busy = trace_uprobe_is_busy,
49 .free = trace_uprobe_release,
50 .match = trace_uprobe_match,
51};
52
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053053/*
54 * uprobe event core functions
55 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053056struct trace_uprobe {
Masami Hiramatsu0597c492018-11-05 18:03:04 +090057 struct dyn_event devent;
Oleg Nesterova932b732013-01-31 19:47:23 +010058 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070059 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053060 struct inode *inode;
61 char *filename;
62 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053063 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053064 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090065 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053066};
67
Masami Hiramatsu0597c492018-11-05 18:03:04 +090068static bool is_trace_uprobe(struct dyn_event *ev)
69{
70 return ev->ops == &trace_uprobe_ops;
71}
72
73static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
74{
75 return container_of(ev, struct trace_uprobe, devent);
76}
77
78/**
79 * for_each_trace_uprobe - iterate over the trace_uprobe list
80 * @pos: the struct trace_uprobe * for each entry
81 * @dpos: the struct dyn_event * to use as a loop cursor
82 */
83#define for_each_trace_uprobe(pos, dpos) \
84 for_each_dyn_event(dpos) \
85 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
86
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053087static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040088static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053089
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090090struct uprobe_dispatch_data {
91 struct trace_uprobe *tu;
92 unsigned long bp_addr;
93};
94
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053095static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010096static int uretprobe_dispatcher(struct uprobe_consumer *con,
97 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053098
Namhyung Kim3fd996a2013-11-26 15:21:04 +090099#ifdef CONFIG_STACK_GROWSUP
100static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
101{
102 return addr - (n * sizeof(long));
103}
104#else
105static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
106{
107 return addr + (n * sizeof(long));
108}
109#endif
110
111static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
112{
113 unsigned long ret;
114 unsigned long addr = user_stack_pointer(regs);
115
116 addr = adjust_stack_addr(addr, n);
117
118 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
119 return 0;
120
121 return ret;
122}
123
124/*
125 * Uprobes-specific fetch functions
126 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900127static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900128probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900129{
130 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900131
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900132 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900133}
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +0900134
135static nokprobe_inline int
136probe_mem_read_user(void *dest, void *src, size_t size)
137{
138 return probe_mem_read(dest, src, size);
139}
140
Namhyung Kim5baaa592013-11-26 15:21:04 +0900141/*
142 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
143 * length and relative data location.
144 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900145static nokprobe_inline int
146fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900147{
148 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900149 u32 loc = *(u32 *)dest;
150 int maxlen = get_loc_len(loc);
151 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900152 void __user *src = (void __force __user *) addr;
153
Masami Hiramatsu91784122018-04-25 21:19:01 +0900154 if (unlikely(!maxlen))
155 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900156
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900157 if (addr == FETCH_TOKEN_COMM)
158 ret = strlcpy(dst, current->comm, maxlen);
159 else
160 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900161 if (ret >= 0) {
162 if (ret == maxlen)
163 dst[ret - 1] = '\0';
Andreas Ziegler07220692019-01-16 15:16:29 +0100164 else
165 /*
166 * Include the terminating null byte. In this case it
167 * was copied by strncpy_from_user but not accounted
168 * for in ret.
169 */
170 ret++;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900171 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900172 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900173
174 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900175}
176
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900177static nokprobe_inline int
178fetch_store_string_user(unsigned long addr, void *dest, void *base)
179{
180 return fetch_store_string(addr, dest, base);
181}
182
Masami Hiramatsu53305922018-04-25 21:18:03 +0900183/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900184static nokprobe_inline int
185fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900186{
187 int len;
188 void __user *vaddr = (void __force __user *) addr;
189
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900190 if (addr == FETCH_TOKEN_COMM)
191 len = strlen(current->comm) + 1;
192 else
193 len = strnlen_user(vaddr, MAX_STRING_SIZE);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900194
Masami Hiramatsu91784122018-04-25 21:19:01 +0900195 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900196}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900197
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900198static nokprobe_inline int
199fetch_store_strlen_user(unsigned long addr)
200{
201 return fetch_store_strlen(addr);
202}
203
Masami Hiramatsu53305922018-04-25 21:18:03 +0900204static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900205{
206 unsigned long base_addr;
207 struct uprobe_dispatch_data *udd;
208
209 udd = (void *) current->utask->vaddr;
210
211 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900212 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900213}
214
Masami Hiramatsu53305922018-04-25 21:18:03 +0900215/* Note that we don't verify it, since the code does not come from user space */
216static int
Steven Rostedt (VMware)8565a452021-08-19 00:13:28 -0400217process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900218 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900219{
Steven Rostedt (VMware)8565a452021-08-19 00:13:28 -0400220 struct pt_regs *regs = rec;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900221 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900222
223 /* 1st stage: get value from context */
224 switch (code->op) {
225 case FETCH_OP_REG:
226 val = regs_get_register(regs, code->param);
227 break;
228 case FETCH_OP_STACK:
229 val = get_user_stack_nth(regs, code->param);
230 break;
231 case FETCH_OP_STACKP:
232 val = user_stack_pointer(regs);
233 break;
234 case FETCH_OP_RETVAL:
235 val = regs_return_value(regs);
236 break;
237 case FETCH_OP_IMM:
238 val = code->immediate;
239 break;
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900240 case FETCH_OP_COMM:
241 val = FETCH_TOKEN_COMM;
242 break;
Masami Hiramatsua42e3c42019-06-20 00:08:37 +0900243 case FETCH_OP_DATA:
244 val = (unsigned long)code->data;
245 break;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900246 case FETCH_OP_FOFFS:
247 val = translate_user_vaddr(code->immediate);
248 break;
249 default:
250 return -EILSEQ;
251 }
252 code++;
253
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900254 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900255}
256NOKPROBE_SYMBOL(process_fetch_insn)
257
Oleg Nesterov736288b2013-02-03 20:58:35 +0100258static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
259{
260 rwlock_init(&filter->rwlock);
261 filter->nr_systemwide = 0;
262 INIT_LIST_HEAD(&filter->perf_events);
263}
264
265static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
266{
267 return !filter->nr_systemwide && list_empty(&filter->perf_events);
268}
269
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100270static inline bool is_ret_probe(struct trace_uprobe *tu)
271{
272 return tu->consumer.ret_handler != NULL;
273}
274
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900275static bool trace_uprobe_is_busy(struct dyn_event *ev)
276{
277 struct trace_uprobe *tu = to_trace_uprobe(ev);
278
279 return trace_probe_is_enabled(&tu->tp);
280}
281
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900282static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
283 int argc, const char **argv)
284{
285 char buf[MAX_ARGSTR_LEN + 1];
286 int len;
287
288 if (!argc)
289 return true;
290
291 len = strlen(tu->filename);
292 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
293 return false;
294
295 if (tu->ref_ctr_offset == 0)
296 snprintf(buf, sizeof(buf), "0x%0*lx",
297 (int)(sizeof(void *) * 2), tu->offset);
298 else
299 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
300 (int)(sizeof(void *) * 2), tu->offset,
301 tu->ref_ctr_offset);
302 if (strcmp(buf, &argv[0][len + 1]))
303 return false;
304
305 argc--; argv++;
306
307 return trace_probe_match_command_args(&tu->tp, argc, argv);
308}
309
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900310static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +0900311 int argc, const char **argv, struct dyn_event *ev)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900312{
313 struct trace_uprobe *tu = to_trace_uprobe(ev);
314
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900315 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900316 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
317 trace_uprobe_match_command_head(tu, argc, argv);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900318}
319
Masami Hiramatsu60d53e22019-06-20 00:07:20 +0900320static nokprobe_inline struct trace_uprobe *
321trace_uprobe_primary_from_call(struct trace_event_call *call)
322{
323 struct trace_probe *tp;
324
325 tp = trace_probe_primary_from_call(call);
326 if (WARN_ON_ONCE(!tp))
327 return NULL;
328
329 return container_of(tp, struct trace_uprobe, tp);
330}
331
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530332/*
333 * Allocate new trace_uprobe and initialize it (including uprobes).
334 */
335static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100336alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530337{
338 struct trace_uprobe *tu;
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900339 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530340
Steven Rostedt (VMware)845cbf32021-08-16 23:43:00 -0400341 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530342 if (!tu)
343 return ERR_PTR(-ENOMEM);
344
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900345 ret = trace_probe_init(&tu->tp, event, group, true);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900346 if (ret < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530347 goto error;
348
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900349 dyn_event_init(&tu->devent, &trace_uprobe_ops);
Oleg Nesterova932b732013-01-31 19:47:23 +0100350 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100351 if (is_ret)
352 tu->consumer.ret_handler = uretprobe_dispatcher;
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900353 init_trace_uprobe_filter(tu->tp.event->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530354 return tu;
355
356error:
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530357 kfree(tu);
358
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900359 return ERR_PTR(ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530360}
361
362static void free_trace_uprobe(struct trace_uprobe *tu)
363{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900364 if (!tu)
365 return;
366
Song Liu0c92c7a2018-04-23 10:21:34 -0700367 path_put(&tu->path);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900368 trace_probe_cleanup(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530369 kfree(tu->filename);
370 kfree(tu);
371}
372
373static struct trace_uprobe *find_probe_event(const char *event, const char *group)
374{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900375 struct dyn_event *pos;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530376 struct trace_uprobe *tu;
377
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900378 for_each_trace_uprobe(tu, pos)
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900379 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
380 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530381 return tu;
382
383 return NULL;
384}
385
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900386/* Unregister a trace_uprobe and probe_event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400387static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530388{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400389 int ret;
390
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900391 if (trace_probe_has_sibling(&tu->tp))
392 goto unreg;
393
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -0400394 /* If there's a reference to the dynamic event */
395 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
396 return -EBUSY;
397
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400398 ret = unregister_uprobe_event(tu);
399 if (ret)
400 return ret;
401
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900402unreg:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900403 dyn_event_remove(&tu->devent);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900404 trace_probe_unlink(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530405 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400406 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530407}
408
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900409static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
410 struct trace_uprobe *comp)
411{
412 struct trace_probe_event *tpe = orig->tp.event;
413 struct trace_probe *pos;
414 struct inode *comp_inode = d_real_inode(comp->path.dentry);
415 int i;
416
417 list_for_each_entry(pos, &tpe->probes, list) {
418 orig = container_of(pos, struct trace_uprobe, tp);
419 if (comp_inode != d_real_inode(orig->path.dentry) ||
420 comp->offset != orig->offset)
421 continue;
422
423 /*
424 * trace_probe_compare_arg_type() ensured that nr_args and
425 * each argument name and type are same. Let's compare comm.
426 */
427 for (i = 0; i < orig->tp.nr_args; i++) {
428 if (strcmp(orig->tp.args[i].comm,
429 comp->tp.args[i].comm))
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530430 break;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900431 }
432
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530433 if (i == orig->tp.nr_args)
434 return true;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900435 }
436
437 return false;
438}
439
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900440static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
441{
442 int ret;
443
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900444 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
445 if (ret) {
446 /* Note that argument starts index = 2 */
447 trace_probe_log_set_index(ret + 1);
448 trace_probe_log_err(0, DIFF_ARG_TYPE);
449 return -EEXIST;
450 }
451 if (trace_uprobe_has_same_uprobe(to, tu)) {
452 trace_probe_log_set_index(0);
453 trace_probe_log_err(0, SAME_PROBE);
454 return -EEXIST;
455 }
456
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900457 /* Append to existing event */
458 ret = trace_probe_append(&tu->tp, &to->tp);
459 if (!ret)
Steven Rostedt (VMware)8b0e6c72021-08-16 23:42:56 -0400460 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900461
462 return ret;
463}
464
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530465/*
466 * Uprobe with multiple reference counter is not allowed. i.e.
467 * If inode and offset matches, reference counter offset *must*
468 * match as well. Though, there is one exception: If user is
469 * replacing old trace_uprobe with new one(same group/event),
470 * then we allow same uprobe with new reference counter as far
471 * as the new one does not conflict with any other existing
472 * ones.
473 */
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900474static int validate_ref_ctr_offset(struct trace_uprobe *new)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530475{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900476 struct dyn_event *pos;
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900477 struct trace_uprobe *tmp;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530478 struct inode *new_inode = d_real_inode(new->path.dentry);
479
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900480 for_each_trace_uprobe(tmp, pos) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900481 if (new_inode == d_real_inode(tmp->path.dentry) &&
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530482 new->offset == tmp->offset &&
483 new->ref_ctr_offset != tmp->ref_ctr_offset) {
484 pr_warn("Reference counter offset mismatch.");
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900485 return -EINVAL;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530486 }
487 }
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900488 return 0;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530489}
490
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530491/* Register a trace_uprobe and probe_event */
492static int register_trace_uprobe(struct trace_uprobe *tu)
493{
Namhyung Kim14577c32013-07-03 15:42:53 +0900494 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530495 int ret;
496
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900497 mutex_lock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530498
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900499 ret = validate_ref_ctr_offset(tu);
500 if (ret)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530501 goto end;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530502
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900503 /* register as an event */
504 old_tu = find_probe_event(trace_probe_name(&tu->tp),
505 trace_probe_group_name(&tu->tp));
Namhyung Kim14577c32013-07-03 15:42:53 +0900506 if (old_tu) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900507 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
508 trace_probe_log_set_index(0);
509 trace_probe_log_err(0, DIFF_PROBE_TYPE);
510 ret = -EEXIST;
511 } else {
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900512 ret = append_trace_uprobe(tu, old_tu);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900513 }
514 goto end;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400515 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530516
517 ret = register_uprobe_event(tu);
518 if (ret) {
Masami Hiramatsu8e242062021-08-19 19:26:02 +0900519 if (ret == -EEXIST) {
520 trace_probe_log_set_index(0);
521 trace_probe_log_err(0, EVENT_EXIST);
522 } else
523 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530524 goto end;
525 }
526
Steven Rostedt (VMware)8b0e6c72021-08-16 23:42:56 -0400527 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530528
529end:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900530 mutex_unlock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530531
532 return ret;
533}
534
535/*
536 * Argument syntax:
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900537 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530538 */
Masami Hiramatsud2622712021-02-01 13:48:11 -0600539static int __trace_uprobe_create(int argc, const char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530540{
541 struct trace_uprobe *tu;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900542 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
543 char *arg, *filename, *rctr, *rctr_end, *tmp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530544 char buf[MAX_EVENT_NAME_LEN];
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -0400545 enum probe_print_type ptype;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530546 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530547 unsigned long offset, ref_ctr_offset;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900548 bool is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530549 int i, ret;
550
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530551 ret = 0;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530552 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530553
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900554 switch (argv[0][0]) {
555 case 'r':
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100556 is_return = true;
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900557 break;
558 case 'p':
559 break;
560 default:
561 return -ECANCELED;
562 }
563
564 if (argc < 2)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900565 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530566
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900567 if (argv[0][1] == ':')
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530568 event = &argv[0][2];
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530569
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900570 if (!strchr(argv[1], '/'))
571 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530572
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900573 filename = kstrdup(argv[1], GFP_KERNEL);
574 if (!filename)
575 return -ENOMEM;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530576
Kenny Yu6496bb72017-01-13 08:58:34 -0800577 /* Find the last occurrence, in case the path contains ':' too. */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900578 arg = strrchr(filename, ':');
579 if (!arg || !isdigit(arg[1])) {
580 kfree(filename);
581 return -ECANCELED;
582 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530583
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500584 trace_probe_log_init("trace_uprobe", argc, argv);
585 trace_probe_log_set_index(1); /* filename is the 2nd argument */
586
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530587 *arg++ = '\0';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530588 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900589 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500590 trace_probe_log_err(0, FILE_NOT_FOUND);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900591 kfree(filename);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500592 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700593 return ret;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900594 }
Song Liu0c92c7a2018-04-23 10:21:34 -0700595 if (!d_is_reg(path.dentry)) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500596 trace_probe_log_err(0, NO_REGULAR_FILE);
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800597 ret = -EINVAL;
598 goto fail_address_parse;
599 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530600
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530601 /* Parse reference counter offset if specified. */
602 rctr = strchr(arg, '(');
603 if (rctr) {
604 rctr_end = strchr(rctr, ')');
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500605 if (!rctr_end) {
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530606 ret = -EINVAL;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500607 rctr_end = rctr + strlen(rctr);
608 trace_probe_log_err(rctr_end - filename,
609 REFCNT_OPEN_BRACE);
610 goto fail_address_parse;
611 } else if (rctr_end[1] != '\0') {
612 ret = -EINVAL;
613 trace_probe_log_err(rctr_end + 1 - filename,
614 BAD_REFCNT_SUFFIX);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530615 goto fail_address_parse;
616 }
617
618 *rctr++ = '\0';
619 *rctr_end = '\0';
620 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
621 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500622 trace_probe_log_err(rctr - filename, BAD_REFCNT);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530623 goto fail_address_parse;
624 }
625 }
626
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900627 /* Check if there is %return suffix */
628 tmp = strchr(arg, '%');
629 if (tmp) {
630 if (!strcmp(tmp, "%return")) {
631 *tmp = '\0';
632 is_return = true;
633 } else {
634 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
635 ret = -EINVAL;
636 goto fail_address_parse;
637 }
638 }
639
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530640 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100641 ret = kstrtoul(arg, 0, &offset);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500642 if (ret) {
643 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100644 goto fail_address_parse;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500645 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530646
647 /* setup a probe */
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500648 trace_probe_log_set_index(0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900649 if (event) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500650 ret = traceprobe_parse_event_name(&event, &group, buf,
651 event - argv[0]);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900652 if (ret)
653 goto fail_address_parse;
654 } else {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800655 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530656 char *ptr;
657
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800658 tail = kstrdup(kbasename(filename), GFP_KERNEL);
659 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530660 ret = -ENOMEM;
661 goto fail_address_parse;
662 }
663
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530664 ptr = strpbrk(tail, ".-_");
665 if (ptr)
666 *ptr = '\0';
667
668 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
669 event = buf;
670 kfree(tail);
671 }
672
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500673 argc -= 2;
674 argv += 2;
675
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100676 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530677 if (IS_ERR(tu)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530678 ret = PTR_ERR(tu);
Masami Hiramatsua0394802019-03-14 13:30:50 +0900679 /* This must return -ENOMEM otherwise there is a bug */
680 WARN_ON_ONCE(ret != -ENOMEM);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530681 goto fail_address_parse;
682 }
683 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530684 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700685 tu->path = path;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900686 tu->filename = filename;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530687
688 /* parse arguments */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530689 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500690 trace_probe_log_set_index(i + 2);
Steven Rostedt (VMware)fcd9db52021-08-16 23:42:58 -0400691 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i],
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900692 is_return ? TPARG_FL_RETURN : 0);
Masami Hiramatsud00bbea92018-11-05 18:01:40 +0900693 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530694 goto error;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530695 }
696
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -0400697 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
698 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
Masami Hiramatsub4d4b962019-06-01 00:16:56 +0900699 if (ret < 0)
700 goto error;
701
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530702 ret = register_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500703 if (!ret)
704 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530705
706error:
707 free_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500708out:
709 trace_probe_log_clear();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530710 return ret;
711
712fail_address_parse:
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500713 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700714 path_put(&path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900715 kfree(filename);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530716
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530717 return ret;
718}
719
Masami Hiramatsud2622712021-02-01 13:48:11 -0600720int trace_uprobe_create(const char *raw_command)
721{
722 return trace_probe_create(raw_command, __trace_uprobe_create);
723}
724
725static int create_or_delete_trace_uprobe(const char *raw_command)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530726{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900727 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530728
Masami Hiramatsud2622712021-02-01 13:48:11 -0600729 if (raw_command[0] == '-')
730 return dyn_event_release(raw_command, &trace_uprobe_ops);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900731
Masami Hiramatsud2622712021-02-01 13:48:11 -0600732 ret = trace_uprobe_create(raw_command);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900733 return ret == -ECANCELED ? -EINVAL : ret;
734}
735
736static int trace_uprobe_release(struct dyn_event *ev)
737{
738 struct trace_uprobe *tu = to_trace_uprobe(ev);
739
740 return unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530741}
742
743/* Probes listing interfaces */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900744static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530745{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900746 struct trace_uprobe *tu = to_trace_uprobe(ev);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100747 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530748 int i;
749
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900750 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
751 trace_probe_name(&tu->tp), tu->filename,
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530752 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530753
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530754 if (tu->ref_ctr_offset)
755 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
756
Namhyung Kim14577c32013-07-03 15:42:53 +0900757 for (i = 0; i < tu->tp.nr_args; i++)
758 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530759
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100760 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530761 return 0;
762}
763
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900764static int probes_seq_show(struct seq_file *m, void *v)
765{
766 struct dyn_event *ev = v;
767
768 if (!is_trace_uprobe(ev))
769 return 0;
770
771 return trace_uprobe_show(m, ev);
772}
773
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530774static const struct seq_operations probes_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900775 .start = dyn_event_seq_start,
776 .next = dyn_event_seq_next,
777 .stop = dyn_event_seq_stop,
778 .show = probes_seq_show
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530779};
780
781static int probes_open(struct inode *inode, struct file *file)
782{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400783 int ret;
784
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400785 ret = security_locked_down(LOCKDOWN_TRACEFS);
786 if (ret)
787 return ret;
788
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400789 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900790 ret = dyn_events_release_all(&trace_uprobe_ops);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400791 if (ret)
792 return ret;
793 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530794
795 return seq_open(file, &probes_seq_op);
796}
797
798static ssize_t probes_write(struct file *file, const char __user *buffer,
799 size_t count, loff_t *ppos)
800{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900801 return trace_parse_run_command(file, buffer, count, ppos,
802 create_or_delete_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530803}
804
805static const struct file_operations uprobe_events_ops = {
806 .owner = THIS_MODULE,
807 .open = probes_open,
808 .read = seq_read,
809 .llseek = seq_lseek,
810 .release = seq_release,
811 .write = probes_write,
812};
813
814/* Probes profiling interfaces */
815static int probes_profile_seq_show(struct seq_file *m, void *v)
816{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900817 struct dyn_event *ev = v;
818 struct trace_uprobe *tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530819
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900820 if (!is_trace_uprobe(ev))
821 return 0;
822
823 tu = to_trace_uprobe(ev);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400824 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900825 trace_probe_name(&tu->tp), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530826 return 0;
827}
828
829static const struct seq_operations profile_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900830 .start = dyn_event_seq_start,
831 .next = dyn_event_seq_next,
832 .stop = dyn_event_seq_stop,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530833 .show = probes_profile_seq_show
834};
835
836static int profile_open(struct inode *inode, struct file *file)
837{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400838 int ret;
839
840 ret = security_locked_down(LOCKDOWN_TRACEFS);
841 if (ret)
842 return ret;
843
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530844 return seq_open(file, &profile_seq_op);
845}
846
847static const struct file_operations uprobe_profile_ops = {
848 .owner = THIS_MODULE,
849 .open = profile_open,
850 .read = seq_read,
851 .llseek = seq_lseek,
852 .release = seq_release,
853};
854
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900855struct uprobe_cpu_buffer {
856 struct mutex mutex;
857 void *buf;
858};
859static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
860static int uprobe_buffer_refcnt;
861
862static int uprobe_buffer_init(void)
863{
864 int cpu, err_cpu;
865
866 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
867 if (uprobe_cpu_buffer == NULL)
868 return -ENOMEM;
869
870 for_each_possible_cpu(cpu) {
871 struct page *p = alloc_pages_node(cpu_to_node(cpu),
872 GFP_KERNEL, 0);
873 if (p == NULL) {
874 err_cpu = cpu;
875 goto err;
876 }
877 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
878 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
879 }
880
881 return 0;
882
883err:
884 for_each_possible_cpu(cpu) {
885 if (cpu == err_cpu)
886 break;
887 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
888 }
889
890 free_percpu(uprobe_cpu_buffer);
891 return -ENOMEM;
892}
893
894static int uprobe_buffer_enable(void)
895{
896 int ret = 0;
897
898 BUG_ON(!mutex_is_locked(&event_mutex));
899
900 if (uprobe_buffer_refcnt++ == 0) {
901 ret = uprobe_buffer_init();
902 if (ret < 0)
903 uprobe_buffer_refcnt--;
904 }
905
906 return ret;
907}
908
909static void uprobe_buffer_disable(void)
910{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800911 int cpu;
912
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900913 BUG_ON(!mutex_is_locked(&event_mutex));
914
915 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800916 for_each_possible_cpu(cpu)
917 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
918 cpu)->buf);
919
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900920 free_percpu(uprobe_cpu_buffer);
921 uprobe_cpu_buffer = NULL;
922 }
923}
924
925static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
926{
927 struct uprobe_cpu_buffer *ucb;
928 int cpu;
929
930 cpu = raw_smp_processor_id();
931 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
932
933 /*
934 * Use per-cpu buffers for fastest access, but we might migrate
935 * so the mutex makes sure we have sole access to it.
936 */
937 mutex_lock(&ucb->mutex);
938
939 return ucb;
940}
941
942static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
943{
944 mutex_unlock(&ucb->mutex);
945}
946
Namhyung Kima43b9702014-01-17 17:08:36 +0900947static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900948 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900949 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400950 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530951{
952 struct uprobe_trace_entry_head *entry;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500953 struct trace_buffer *buffer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530954 struct ring_buffer_event *event;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100955 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900956 int size, esize;
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +0900957 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530958
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400959 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900960
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900961 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100962 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530963
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400964 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900965 return;
966
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900967 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900968 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400969 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100970 call->event.type, size, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900971 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900972 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900973
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530974 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100975 if (is_ret_probe(tu)) {
976 entry->vaddr[0] = func;
977 entry->vaddr[1] = instruction_pointer(regs);
978 data = DATAOF_TRACE_ENTRY(entry, true);
979 } else {
980 entry->vaddr[0] = instruction_pointer(regs);
981 data = DATAOF_TRACE_ENTRY(entry, false);
982 }
983
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900984 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530985
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100986 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100987}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100988
Oleg Nesterova51cc602013-03-30 18:02:12 +0100989/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900990static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
991 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100992{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900993 struct event_file_link *link;
994
995 if (is_ret_probe(tu))
996 return 0;
997
998 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900999 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001000 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1001 rcu_read_unlock();
1002
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001003 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301004}
1005
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001006static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001007 struct pt_regs *regs,
1008 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001009{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001010 struct event_file_link *link;
1011
1012 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001013 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001014 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1015 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001016}
1017
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301018/* Event entry printers */
1019static enum print_line_t
1020print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1021{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001022 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301023 struct trace_seq *s = &iter->seq;
1024 struct trace_uprobe *tu;
1025 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301026
Oleg Nesterov457d1772013-03-29 18:26:51 +01001027 entry = (struct uprobe_trace_entry_head *)iter->ent;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001028 tu = trace_uprobe_primary_from_call(
1029 container_of(event, struct trace_event_call, event));
1030 if (unlikely(!tu))
1031 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301032
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001033 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001034 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001035 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001036 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001037 data = DATAOF_TRACE_ENTRY(entry, true);
1038 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001039 trace_seq_printf(s, "%s: (0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001040 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001041 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001042 data = DATAOF_TRACE_ENTRY(entry, false);
1043 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301044
Masami Hiramatsu56de7632018-04-25 21:16:36 +09001045 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1046 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301047
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001048 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301049
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001050 out:
1051 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301052}
1053
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001054typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1055 enum uprobe_filter_ctx ctx,
1056 struct mm_struct *mm);
1057
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001058static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301059{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001060 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301061
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001062 tu->consumer.filter = filter;
1063 tu->inode = d_real_inode(tu->path.dentry);
1064
1065 if (tu->ref_ctr_offset)
1066 ret = uprobe_register_refctr(tu->inode, tu->offset,
1067 tu->ref_ctr_offset, &tu->consumer);
1068 else
1069 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1070
1071 if (ret)
1072 tu->inode = NULL;
1073
1074 return ret;
1075}
1076
1077static void __probe_event_disable(struct trace_probe *tp)
1078{
1079 struct trace_probe *pos;
1080 struct trace_uprobe *tu;
1081
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001082 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001083 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001084
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001085 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1086 tu = container_of(pos, struct trace_uprobe, tp);
1087 if (!tu->inode)
1088 continue;
1089
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001090 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1091 tu->inode = NULL;
1092 }
1093}
1094
1095static int probe_event_enable(struct trace_event_call *call,
1096 struct trace_event_file *file, filter_func_t filter)
1097{
1098 struct trace_probe *pos, *tp;
1099 struct trace_uprobe *tu;
1100 bool enabled;
1101 int ret;
1102
1103 tp = trace_probe_primary_from_call(call);
1104 if (WARN_ON_ONCE(!tp))
1105 return -ENODEV;
1106 enabled = trace_probe_is_enabled(tp);
1107
1108 /* This may also change "enabled" state */
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001109 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001110 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001111 return -EINTR;
1112
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001113 ret = trace_probe_add_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001114 if (ret < 0)
1115 return ret;
Oleg Nesterov48212542014-06-27 19:01:36 +02001116 } else {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001117 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001118 return -EINTR;
1119
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001120 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
Oleg Nesterov48212542014-06-27 19:01:36 +02001121 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301122
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001123 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001124 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Oleg Nesterov736288b2013-02-03 20:58:35 +01001125
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001126 if (enabled)
1127 return 0;
1128
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001129 ret = uprobe_buffer_enable();
1130 if (ret)
1131 goto err_flags;
1132
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001133 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1134 tu = container_of(pos, struct trace_uprobe, tp);
1135 ret = trace_uprobe_enable(tu, filter);
1136 if (ret) {
1137 __probe_event_disable(tp);
1138 goto err_buffer;
1139 }
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301140 }
1141
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001142 return 0;
1143
1144 err_buffer:
1145 uprobe_buffer_disable();
1146
1147 err_flags:
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001148 if (file)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001149 trace_probe_remove_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001150 else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001151 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001152
Oleg Nesterov41618242013-01-27 18:36:24 +01001153 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301154}
1155
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001156static void probe_event_disable(struct trace_event_call *call,
1157 struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301158{
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001159 struct trace_probe *tp;
1160
1161 tp = trace_probe_primary_from_call(call);
1162 if (WARN_ON_ONCE(!tp))
1163 return;
1164
1165 if (!trace_probe_is_enabled(tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301166 return;
1167
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001168 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001169 if (trace_probe_remove_file(tp, file) < 0)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001170 return;
1171
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001172 if (trace_probe_is_enabled(tp))
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001173 return;
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001174 } else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001175 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001176
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001177 __probe_event_disable(tp);
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001178 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301179}
1180
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001181static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301182{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001183 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301184 struct uprobe_trace_entry_head field;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001185 struct trace_uprobe *tu;
1186
1187 tu = trace_uprobe_primary_from_call(event_call);
1188 if (unlikely(!tu))
1189 return -ENODEV;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301190
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001191 if (is_ret_probe(tu)) {
1192 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1193 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1194 size = SIZEOF_TRACE_ENTRY(true);
1195 } else {
1196 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1197 size = SIZEOF_TRACE_ENTRY(false);
1198 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001199
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001200 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301201}
1202
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301203#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001204static bool
1205__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1206{
1207 struct perf_event *event;
1208
1209 if (filter->nr_systemwide)
1210 return true;
1211
1212 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001213 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001214 return true;
1215 }
1216
1217 return false;
1218}
1219
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001220static inline bool
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001221trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1222 struct perf_event *event)
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001223{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001224 return __uprobe_perf_filter(filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001225}
1226
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001227static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1228 struct perf_event *event)
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001229{
1230 bool done;
1231
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001232 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001233 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001234 list_del(&event->hw.tp_list);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001235 done = filter->nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001236 (event->hw.target->flags & PF_EXITING) ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001237 trace_uprobe_filter_event(filter, event);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001238 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001239 filter->nr_systemwide--;
1240 done = filter->nr_systemwide;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001241 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001242 write_unlock(&filter->rwlock);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001243
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001244 return done;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001245}
1246
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001247/* This returns true if the filter always covers target mm */
1248static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1249 struct perf_event *event)
Oleg Nesterov736288b2013-02-03 20:58:35 +01001250{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001251 bool done;
1252
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001253 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001254 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001255 /*
1256 * event->parent != NULL means copy_process(), we can avoid
1257 * uprobe_apply(). current->mm must be probed and we can rely
1258 * on dup_mmap() which preserves the already installed bp's.
1259 *
1260 * attr.enable_on_exec means that exec/mmap will install the
1261 * breakpoints we need.
1262 */
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001263 done = filter->nr_systemwide ||
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001264 event->parent || event->attr.enable_on_exec ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001265 trace_uprobe_filter_event(filter, event);
1266 list_add(&event->hw.tp_list, &filter->perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001267 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001268 done = filter->nr_systemwide;
1269 filter->nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001270 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001271 write_unlock(&filter->rwlock);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001272
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001273 return done;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001274}
1275
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001276static int uprobe_perf_close(struct trace_event_call *call,
1277 struct perf_event *event)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001278{
1279 struct trace_probe *pos, *tp;
1280 struct trace_uprobe *tu;
1281 int ret = 0;
1282
1283 tp = trace_probe_primary_from_call(call);
1284 if (WARN_ON_ONCE(!tp))
1285 return -ENODEV;
1286
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001287 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001288 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001289 return 0;
1290
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001291 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1292 tu = container_of(pos, struct trace_uprobe, tp);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001293 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001294 if (ret)
1295 break;
1296 }
1297
1298 return ret;
1299}
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001300
1301static int uprobe_perf_open(struct trace_event_call *call,
1302 struct perf_event *event)
1303{
1304 struct trace_probe *pos, *tp;
1305 struct trace_uprobe *tu;
1306 int err = 0;
1307
1308 tp = trace_probe_primary_from_call(call);
1309 if (WARN_ON_ONCE(!tp))
1310 return -ENODEV;
1311
1312 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001313 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001314 return 0;
1315
1316 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
Jiri Olsa1880ed72021-11-23 15:28:01 +01001317 tu = container_of(pos, struct trace_uprobe, tp);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001318 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1319 if (err) {
1320 uprobe_perf_close(call, event);
1321 break;
1322 }
1323 }
1324
1325 return err;
1326}
1327
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001328static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1329 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1330{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001331 struct trace_uprobe_filter *filter;
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001332 struct trace_uprobe *tu;
1333 int ret;
1334
1335 tu = container_of(uc, struct trace_uprobe, consumer);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001336 filter = tu->tp.event->filter;
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001337
1338 read_lock(&filter->rwlock);
1339 ret = __uprobe_perf_filter(filter, mm);
1340 read_unlock(&filter->rwlock);
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001341
1342 return ret;
1343}
1344
Namhyung Kima43b9702014-01-17 17:08:36 +09001345static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001346 unsigned long func, struct pt_regs *regs,
1347 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301348{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001349 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301350 struct uprobe_trace_entry_head *entry;
1351 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001352 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001353 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001354 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301355
Alexei Starovoitov70ed0702020-02-24 11:27:15 -08001356 if (bpf_prog_array_valid(call)) {
1357 u32 ret;
1358
1359 preempt_disable();
1360 ret = trace_call_bpf(call, regs);
1361 preempt_enable();
1362 if (!ret)
1363 return;
1364 }
Wang Nan04a22fa2015-07-01 02:13:50 +00001365
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001366 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1367
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001368 size = esize + tu->tp.size + dsize;
1369 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1370 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1371 return;
1372
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301373 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001374 head = this_cpu_ptr(call->perf_events);
1375 if (hlist_empty(head))
1376 goto out;
1377
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001378 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301379 if (!entry)
1380 goto out;
1381
Oleg Nesterov393a7362013-03-30 18:46:22 +01001382 if (is_ret_probe(tu)) {
1383 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001384 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001385 data = DATAOF_TRACE_ENTRY(entry, true);
1386 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001387 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001388 data = DATAOF_TRACE_ENTRY(entry, false);
1389 }
1390
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001391 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001392
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001393 if (size - esize > tu->tp.size + dsize) {
1394 int len = tu->tp.size + dsize;
1395
1396 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001397 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301398
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001399 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001400 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301401 out:
1402 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001403}
1404
1405/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001406static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1407 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001408{
1409 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1410 return UPROBE_HANDLER_REMOVE;
1411
Oleg Nesterov393a7362013-03-30 18:46:22 +01001412 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001413 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001414 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301415}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001416
1417static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001418 struct pt_regs *regs,
1419 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001420{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001421 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001422}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001423
1424int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1425 const char **filename, u64 *probe_offset,
1426 bool perf_type_tracepoint)
1427{
1428 const char *pevent = trace_event_name(event->tp_event);
1429 const char *group = event->tp_event->class->system;
1430 struct trace_uprobe *tu;
1431
1432 if (perf_type_tracepoint)
1433 tu = find_probe_event(pevent, group);
1434 else
Jean-Philippe Brucker22d5bd62020-06-08 14:45:32 +02001435 tu = trace_uprobe_primary_from_call(event->tp_event);
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001436 if (!tu)
1437 return -EINVAL;
1438
1439 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1440 : BPF_FD_TYPE_UPROBE;
1441 *filename = tu->filename;
1442 *probe_offset = tu->offset;
1443 return 0;
1444}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301445#endif /* CONFIG_PERF_EVENTS */
1446
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001447static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001448trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001449 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301450{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001451 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301452
1453 switch (type) {
1454 case TRACE_REG_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001455 return probe_event_enable(event, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301456
1457 case TRACE_REG_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001458 probe_event_disable(event, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301459 return 0;
1460
1461#ifdef CONFIG_PERF_EVENTS
1462 case TRACE_REG_PERF_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001463 return probe_event_enable(event, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301464
1465 case TRACE_REG_PERF_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001466 probe_event_disable(event, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301467 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001468
1469 case TRACE_REG_PERF_OPEN:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001470 return uprobe_perf_open(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001471
1472 case TRACE_REG_PERF_CLOSE:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001473 return uprobe_perf_close(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001474
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301475#endif
1476 default:
1477 return 0;
1478 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301479}
1480
1481static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1482{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301483 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001484 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001485 struct uprobe_cpu_buffer *ucb;
1486 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001487 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301488
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001489
Oleg Nesterova932b732013-01-31 19:47:23 +01001490 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001491 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301492
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001493 udd.tu = tu;
1494 udd.bp_addr = instruction_pointer(regs);
1495
1496 current->utask->vaddr = (unsigned long) &udd;
1497
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001498 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1499 return 0;
1500
1501 dsize = __get_data_size(&tu->tp, regs);
1502 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1503
1504 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001505 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001506
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001507 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001508 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301509
1510#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001511 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001512 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301513#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001514 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001515 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301516}
1517
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001518static int uretprobe_dispatcher(struct uprobe_consumer *con,
1519 unsigned long func, struct pt_regs *regs)
1520{
1521 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001522 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001523 struct uprobe_cpu_buffer *ucb;
1524 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001525
1526 tu = container_of(con, struct trace_uprobe, consumer);
1527
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001528 udd.tu = tu;
1529 udd.bp_addr = func;
1530
1531 current->utask->vaddr = (unsigned long) &udd;
1532
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001533 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1534 return 0;
1535
1536 dsize = __get_data_size(&tu->tp, regs);
1537 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1538
1539 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001540 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001541
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001542 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001543 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001544
1545#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001546 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001547 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001548#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001549 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001550 return 0;
1551}
1552
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301553static struct trace_event_functions uprobe_funcs = {
1554 .trace = print_uprobe_event
1555};
1556
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001557static struct trace_event_fields uprobe_fields_array[] = {
1558 { .type = TRACE_FUNCTION_TYPE,
1559 .define_fields = uprobe_event_define_fields },
1560 {}
1561};
1562
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001563static inline void init_trace_event_call(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301564{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001565 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301566 call->event.funcs = &uprobe_funcs;
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001567 call->class->fields_array = uprobe_fields_array;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301568
Song Liu9fd2e482019-05-07 09:15:45 -07001569 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
Song Liu33ea4b22017-12-06 14:45:16 -08001570 call->class->reg = trace_uprobe_register;
Song Liu33ea4b22017-12-06 14:45:16 -08001571}
1572
1573static int register_uprobe_event(struct trace_uprobe *tu)
1574{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001575 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001576
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001577 return trace_probe_register_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301578}
1579
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001580static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301581{
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001582 return trace_probe_unregister_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301583}
1584
Song Liu33ea4b22017-12-06 14:45:16 -08001585#ifdef CONFIG_PERF_EVENTS
1586struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001587create_local_trace_uprobe(char *name, unsigned long offs,
1588 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001589{
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -04001590 enum probe_print_type ptype;
Song Liu33ea4b22017-12-06 14:45:16 -08001591 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001592 struct path path;
1593 int ret;
1594
1595 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1596 if (ret)
1597 return ERR_PTR(ret);
1598
Song Liu0c92c7a2018-04-23 10:21:34 -07001599 if (!d_is_reg(path.dentry)) {
1600 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001601 return ERR_PTR(-EINVAL);
1602 }
1603
1604 /*
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001605 * local trace_kprobes are not added to dyn_event, so they are never
Song Liu33ea4b22017-12-06 14:45:16 -08001606 * searched in find_trace_kprobe(). Therefore, there is no concern of
1607 * duplicated name "DUMMY_EVENT" here.
1608 */
1609 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1610 is_return);
1611
1612 if (IS_ERR(tu)) {
1613 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1614 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001615 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001616 return ERR_CAST(tu);
1617 }
1618
1619 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001620 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001621 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001622 tu->filename = kstrdup(name, GFP_KERNEL);
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001623 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001624
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -04001625 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1626 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001627 ret = -ENOMEM;
1628 goto error;
1629 }
1630
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001631 return trace_probe_event_call(&tu->tp);
Song Liu33ea4b22017-12-06 14:45:16 -08001632error:
1633 free_trace_uprobe(tu);
1634 return ERR_PTR(ret);
1635}
1636
1637void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1638{
1639 struct trace_uprobe *tu;
1640
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001641 tu = trace_uprobe_primary_from_call(event_call);
Song Liu33ea4b22017-12-06 14:45:16 -08001642
Song Liu33ea4b22017-12-06 14:45:16 -08001643 free_trace_uprobe(tu);
1644}
1645#endif /* CONFIG_PERF_EVENTS */
1646
Bhaskar Chowdhury39bcdd62021-01-12 10:20:08 +05301647/* Make a trace interface for controlling probe points */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301648static __init int init_uprobe_trace(void)
1649{
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001650 int ret;
1651
1652 ret = dyn_event_register(&trace_uprobe_ops);
1653 if (ret)
1654 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301655
Wei Yang22c36b12020-07-12 09:10:36 +08001656 ret = tracing_init_dentry();
1657 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301658 return 0;
1659
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001660 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301661 NULL, &uprobe_events_ops);
1662 /* Profile interface */
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001663 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301664 NULL, &uprobe_profile_ops);
1665 return 0;
1666}
1667
1668fs_initcall(init_uprobe_trace);