blob: d219ba50efbd4d5ff4814722fdb42f604d55aa92 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Andreas Zieglerea6eb5e2019-01-17 14:30:23 +01008#define pr_fmt(fmt) "trace_uprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040010#include <linux/security.h>
Masami Hiramatsu0597c492018-11-05 18:03:04 +090011#include <linux/ctype.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053012#include <linux/module.h>
13#include <linux/uaccess.h>
14#include <linux/uprobes.h>
15#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080016#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010017#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053018
Masami Hiramatsu0597c492018-11-05 18:03:04 +090019#include "trace_dynevent.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053020#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090021#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053022
23#define UPROBE_EVENT_SYSTEM "uprobes"
24
Oleg Nesterov457d1772013-03-29 18:26:51 +010025struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28};
29
30#define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34#define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
Masami Hiramatsud2622712021-02-01 13:48:11 -060037static int trace_uprobe_create(const char *raw_command);
Masami Hiramatsu0597c492018-11-05 18:03:04 +090038static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_uprobe_release(struct dyn_event *ev);
40static bool trace_uprobe_is_busy(struct dyn_event *ev);
41static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +090042 int argc, const char **argv, struct dyn_event *ev);
Masami Hiramatsu0597c492018-11-05 18:03:04 +090043
44static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50};
51
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052/*
53 * uprobe event core functions
54 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053055struct trace_uprobe {
Masami Hiramatsu0597c492018-11-05 18:03:04 +090056 struct dyn_event devent;
Oleg Nesterova932b732013-01-31 19:47:23 +010057 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070058 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053059 struct inode *inode;
60 char *filename;
61 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053062 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053063 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090064 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065};
66
Masami Hiramatsu0597c492018-11-05 18:03:04 +090067static bool is_trace_uprobe(struct dyn_event *ev)
68{
69 return ev->ops == &trace_uprobe_ops;
70}
71
72static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73{
74 return container_of(ev, struct trace_uprobe, devent);
75}
76
77/**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82#define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053086static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040087static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053088
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090089struct uprobe_dispatch_data {
90 struct trace_uprobe *tu;
91 unsigned long bp_addr;
92};
93
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053094static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010095static int uretprobe_dispatcher(struct uprobe_consumer *con,
96 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053097
Namhyung Kim3fd996a2013-11-26 15:21:04 +090098#ifdef CONFIG_STACK_GROWSUP
99static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
100{
101 return addr - (n * sizeof(long));
102}
103#else
104static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
105{
106 return addr + (n * sizeof(long));
107}
108#endif
109
110static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
111{
112 unsigned long ret;
113 unsigned long addr = user_stack_pointer(regs);
114
115 addr = adjust_stack_addr(addr, n);
116
117 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
118 return 0;
119
120 return ret;
121}
122
123/*
124 * Uprobes-specific fetch functions
125 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900126static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900127probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900128{
129 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900130
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900131 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900132}
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +0900133
134static nokprobe_inline int
135probe_mem_read_user(void *dest, void *src, size_t size)
136{
137 return probe_mem_read(dest, src, size);
138}
139
Namhyung Kim5baaa592013-11-26 15:21:04 +0900140/*
141 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
142 * length and relative data location.
143 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900144static nokprobe_inline int
145fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900146{
147 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900148 u32 loc = *(u32 *)dest;
149 int maxlen = get_loc_len(loc);
150 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900151 void __user *src = (void __force __user *) addr;
152
Masami Hiramatsu91784122018-04-25 21:19:01 +0900153 if (unlikely(!maxlen))
154 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900155
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900156 if (addr == FETCH_TOKEN_COMM)
157 ret = strlcpy(dst, current->comm, maxlen);
158 else
159 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900160 if (ret >= 0) {
161 if (ret == maxlen)
162 dst[ret - 1] = '\0';
Andreas Ziegler07220692019-01-16 15:16:29 +0100163 else
164 /*
165 * Include the terminating null byte. In this case it
166 * was copied by strncpy_from_user but not accounted
167 * for in ret.
168 */
169 ret++;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900170 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900171 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900172
173 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900174}
175
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900176static nokprobe_inline int
177fetch_store_string_user(unsigned long addr, void *dest, void *base)
178{
179 return fetch_store_string(addr, dest, base);
180}
181
Masami Hiramatsu53305922018-04-25 21:18:03 +0900182/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900183static nokprobe_inline int
184fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900185{
186 int len;
187 void __user *vaddr = (void __force __user *) addr;
188
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900189 if (addr == FETCH_TOKEN_COMM)
190 len = strlen(current->comm) + 1;
191 else
192 len = strnlen_user(vaddr, MAX_STRING_SIZE);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900193
Masami Hiramatsu91784122018-04-25 21:19:01 +0900194 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900195}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900196
Masami Hiramatsu88903c42019-05-15 14:38:30 +0900197static nokprobe_inline int
198fetch_store_strlen_user(unsigned long addr)
199{
200 return fetch_store_strlen(addr);
201}
202
Masami Hiramatsu53305922018-04-25 21:18:03 +0900203static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900204{
205 unsigned long base_addr;
206 struct uprobe_dispatch_data *udd;
207
208 udd = (void *) current->utask->vaddr;
209
210 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900211 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900212}
213
Masami Hiramatsu53305922018-04-25 21:18:03 +0900214/* Note that we don't verify it, since the code does not come from user space */
215static int
Steven Rostedt (VMware)8565a452021-08-19 00:13:28 -0400216process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900217 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900218{
Steven Rostedt (VMware)8565a452021-08-19 00:13:28 -0400219 struct pt_regs *regs = rec;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900220 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900221
222 /* 1st stage: get value from context */
223 switch (code->op) {
224 case FETCH_OP_REG:
225 val = regs_get_register(regs, code->param);
226 break;
227 case FETCH_OP_STACK:
228 val = get_user_stack_nth(regs, code->param);
229 break;
230 case FETCH_OP_STACKP:
231 val = user_stack_pointer(regs);
232 break;
233 case FETCH_OP_RETVAL:
234 val = regs_return_value(regs);
235 break;
236 case FETCH_OP_IMM:
237 val = code->immediate;
238 break;
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900239 case FETCH_OP_COMM:
240 val = FETCH_TOKEN_COMM;
241 break;
Masami Hiramatsua42e3c42019-06-20 00:08:37 +0900242 case FETCH_OP_DATA:
243 val = (unsigned long)code->data;
244 break;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900245 case FETCH_OP_FOFFS:
246 val = translate_user_vaddr(code->immediate);
247 break;
248 default:
249 return -EILSEQ;
250 }
251 code++;
252
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900253 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900254}
255NOKPROBE_SYMBOL(process_fetch_insn)
256
Oleg Nesterov736288b2013-02-03 20:58:35 +0100257static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
258{
259 rwlock_init(&filter->rwlock);
260 filter->nr_systemwide = 0;
261 INIT_LIST_HEAD(&filter->perf_events);
262}
263
264static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
265{
266 return !filter->nr_systemwide && list_empty(&filter->perf_events);
267}
268
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100269static inline bool is_ret_probe(struct trace_uprobe *tu)
270{
271 return tu->consumer.ret_handler != NULL;
272}
273
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900274static bool trace_uprobe_is_busy(struct dyn_event *ev)
275{
276 struct trace_uprobe *tu = to_trace_uprobe(ev);
277
278 return trace_probe_is_enabled(&tu->tp);
279}
280
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900281static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
282 int argc, const char **argv)
283{
284 char buf[MAX_ARGSTR_LEN + 1];
285 int len;
286
287 if (!argc)
288 return true;
289
290 len = strlen(tu->filename);
291 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
292 return false;
293
294 if (tu->ref_ctr_offset == 0)
295 snprintf(buf, sizeof(buf), "0x%0*lx",
296 (int)(sizeof(void *) * 2), tu->offset);
297 else
298 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
299 (int)(sizeof(void *) * 2), tu->offset,
300 tu->ref_ctr_offset);
301 if (strcmp(buf, &argv[0][len + 1]))
302 return false;
303
304 argc--; argv++;
305
306 return trace_probe_match_command_args(&tu->tp, argc, argv);
307}
308
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900309static bool trace_uprobe_match(const char *system, const char *event,
Masami Hiramatsu30199132019-06-20 00:07:39 +0900310 int argc, const char **argv, struct dyn_event *ev)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900311{
312 struct trace_uprobe *tu = to_trace_uprobe(ev);
313
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900314 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
Masami Hiramatsuab10d692019-06-20 00:08:18 +0900315 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
316 trace_uprobe_match_command_head(tu, argc, argv);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900317}
318
Masami Hiramatsu60d53e22019-06-20 00:07:20 +0900319static nokprobe_inline struct trace_uprobe *
320trace_uprobe_primary_from_call(struct trace_event_call *call)
321{
322 struct trace_probe *tp;
323
324 tp = trace_probe_primary_from_call(call);
325 if (WARN_ON_ONCE(!tp))
326 return NULL;
327
328 return container_of(tp, struct trace_uprobe, tp);
329}
330
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530331/*
332 * Allocate new trace_uprobe and initialize it (including uprobes).
333 */
334static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100335alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530336{
337 struct trace_uprobe *tu;
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900338 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530339
Steven Rostedt (VMware)845cbf32021-08-16 23:43:00 -0400340 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530341 if (!tu)
342 return ERR_PTR(-ENOMEM);
343
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900344 ret = trace_probe_init(&tu->tp, event, group, true);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900345 if (ret < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530346 goto error;
347
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900348 dyn_event_init(&tu->devent, &trace_uprobe_ops);
Oleg Nesterova932b732013-01-31 19:47:23 +0100349 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100350 if (is_ret)
351 tu->consumer.ret_handler = uretprobe_dispatcher;
Masami Hiramatsub61387c2020-01-22 12:23:25 +0900352 init_trace_uprobe_filter(tu->tp.event->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530353 return tu;
354
355error:
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 kfree(tu);
357
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900358 return ERR_PTR(ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530359}
360
361static void free_trace_uprobe(struct trace_uprobe *tu)
362{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900363 if (!tu)
364 return;
365
Song Liu0c92c7a2018-04-23 10:21:34 -0700366 path_put(&tu->path);
Masami Hiramatsu455b2892019-06-01 00:17:06 +0900367 trace_probe_cleanup(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530368 kfree(tu->filename);
369 kfree(tu);
370}
371
372static struct trace_uprobe *find_probe_event(const char *event, const char *group)
373{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900374 struct dyn_event *pos;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530375 struct trace_uprobe *tu;
376
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900377 for_each_trace_uprobe(tu, pos)
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900378 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
379 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530380 return tu;
381
382 return NULL;
383}
384
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900385/* Unregister a trace_uprobe and probe_event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400386static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530387{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400388 int ret;
389
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900390 if (trace_probe_has_sibling(&tu->tp))
391 goto unreg;
392
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -0400393 /* If there's a reference to the dynamic event */
394 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
395 return -EBUSY;
396
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400397 ret = unregister_uprobe_event(tu);
398 if (ret)
399 return ret;
400
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900401unreg:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900402 dyn_event_remove(&tu->devent);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900403 trace_probe_unlink(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530404 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400405 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530406}
407
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900408static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
409 struct trace_uprobe *comp)
410{
411 struct trace_probe_event *tpe = orig->tp.event;
412 struct trace_probe *pos;
413 struct inode *comp_inode = d_real_inode(comp->path.dentry);
414 int i;
415
416 list_for_each_entry(pos, &tpe->probes, list) {
417 orig = container_of(pos, struct trace_uprobe, tp);
418 if (comp_inode != d_real_inode(orig->path.dentry) ||
419 comp->offset != orig->offset)
420 continue;
421
422 /*
423 * trace_probe_compare_arg_type() ensured that nr_args and
424 * each argument name and type are same. Let's compare comm.
425 */
426 for (i = 0; i < orig->tp.nr_args; i++) {
427 if (strcmp(orig->tp.args[i].comm,
428 comp->tp.args[i].comm))
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530429 break;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900430 }
431
Srikar Dronamrajuf8d7ab22019-09-24 17:19:06 +0530432 if (i == orig->tp.nr_args)
433 return true;
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900434 }
435
436 return false;
437}
438
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900439static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
440{
441 int ret;
442
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900443 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
444 if (ret) {
445 /* Note that argument starts index = 2 */
446 trace_probe_log_set_index(ret + 1);
447 trace_probe_log_err(0, DIFF_ARG_TYPE);
448 return -EEXIST;
449 }
450 if (trace_uprobe_has_same_uprobe(to, tu)) {
451 trace_probe_log_set_index(0);
452 trace_probe_log_err(0, SAME_PROBE);
453 return -EEXIST;
454 }
455
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900456 /* Append to existing event */
457 ret = trace_probe_append(&tu->tp, &to->tp);
458 if (!ret)
Steven Rostedt (VMware)8b0e6c72021-08-16 23:42:56 -0400459 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900460
461 return ret;
462}
463
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530464/*
465 * Uprobe with multiple reference counter is not allowed. i.e.
466 * If inode and offset matches, reference counter offset *must*
467 * match as well. Though, there is one exception: If user is
468 * replacing old trace_uprobe with new one(same group/event),
469 * then we allow same uprobe with new reference counter as far
470 * as the new one does not conflict with any other existing
471 * ones.
472 */
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900473static int validate_ref_ctr_offset(struct trace_uprobe *new)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530474{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900475 struct dyn_event *pos;
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900476 struct trace_uprobe *tmp;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530477 struct inode *new_inode = d_real_inode(new->path.dentry);
478
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900479 for_each_trace_uprobe(tmp, pos) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900480 if (new_inode == d_real_inode(tmp->path.dentry) &&
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530481 new->offset == tmp->offset &&
482 new->ref_ctr_offset != tmp->ref_ctr_offset) {
483 pr_warn("Reference counter offset mismatch.");
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900484 return -EINVAL;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530485 }
486 }
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900487 return 0;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530488}
489
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530490/* Register a trace_uprobe and probe_event */
491static int register_trace_uprobe(struct trace_uprobe *tu)
492{
Namhyung Kim14577c32013-07-03 15:42:53 +0900493 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530494 int ret;
495
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900496 mutex_lock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530497
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900498 ret = validate_ref_ctr_offset(tu);
499 if (ret)
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530500 goto end;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530501
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900502 /* register as an event */
503 old_tu = find_probe_event(trace_probe_name(&tu->tp),
504 trace_probe_group_name(&tu->tp));
Namhyung Kim14577c32013-07-03 15:42:53 +0900505 if (old_tu) {
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900506 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
507 trace_probe_log_set_index(0);
508 trace_probe_log_err(0, DIFF_PROBE_TYPE);
509 ret = -EEXIST;
510 } else {
Masami Hiramatsufe60b0c2019-09-18 17:55:46 +0900511 ret = append_trace_uprobe(tu, old_tu);
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +0900512 }
513 goto end;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400514 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530515
516 ret = register_uprobe_event(tu);
517 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700518 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530519 goto end;
520 }
521
Steven Rostedt (VMware)8b0e6c72021-08-16 23:42:56 -0400522 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530523
524end:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900525 mutex_unlock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530526
527 return ret;
528}
529
530/*
531 * Argument syntax:
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900532 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530533 */
Masami Hiramatsud2622712021-02-01 13:48:11 -0600534static int __trace_uprobe_create(int argc, const char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530535{
536 struct trace_uprobe *tu;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900537 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
538 char *arg, *filename, *rctr, *rctr_end, *tmp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530539 char buf[MAX_EVENT_NAME_LEN];
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -0400540 enum probe_print_type ptype;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530541 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530542 unsigned long offset, ref_ctr_offset;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900543 bool is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530544 int i, ret;
545
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530546 ret = 0;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530547 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530548
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900549 switch (argv[0][0]) {
550 case 'r':
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100551 is_return = true;
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900552 break;
553 case 'p':
554 break;
555 default:
556 return -ECANCELED;
557 }
558
559 if (argc < 2)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900560 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530561
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900562 if (argv[0][1] == ':')
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530563 event = &argv[0][2];
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530564
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900565 if (!strchr(argv[1], '/'))
566 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530567
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900568 filename = kstrdup(argv[1], GFP_KERNEL);
569 if (!filename)
570 return -ENOMEM;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530571
Kenny Yu6496bb72017-01-13 08:58:34 -0800572 /* Find the last occurrence, in case the path contains ':' too. */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900573 arg = strrchr(filename, ':');
574 if (!arg || !isdigit(arg[1])) {
575 kfree(filename);
576 return -ECANCELED;
577 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530578
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500579 trace_probe_log_init("trace_uprobe", argc, argv);
580 trace_probe_log_set_index(1); /* filename is the 2nd argument */
581
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530582 *arg++ = '\0';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530583 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900584 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500585 trace_probe_log_err(0, FILE_NOT_FOUND);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900586 kfree(filename);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500587 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700588 return ret;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900589 }
Song Liu0c92c7a2018-04-23 10:21:34 -0700590 if (!d_is_reg(path.dentry)) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500591 trace_probe_log_err(0, NO_REGULAR_FILE);
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800592 ret = -EINVAL;
593 goto fail_address_parse;
594 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530595
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530596 /* Parse reference counter offset if specified. */
597 rctr = strchr(arg, '(');
598 if (rctr) {
599 rctr_end = strchr(rctr, ')');
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500600 if (!rctr_end) {
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530601 ret = -EINVAL;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500602 rctr_end = rctr + strlen(rctr);
603 trace_probe_log_err(rctr_end - filename,
604 REFCNT_OPEN_BRACE);
605 goto fail_address_parse;
606 } else if (rctr_end[1] != '\0') {
607 ret = -EINVAL;
608 trace_probe_log_err(rctr_end + 1 - filename,
609 BAD_REFCNT_SUFFIX);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530610 goto fail_address_parse;
611 }
612
613 *rctr++ = '\0';
614 *rctr_end = '\0';
615 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
616 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500617 trace_probe_log_err(rctr - filename, BAD_REFCNT);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530618 goto fail_address_parse;
619 }
620 }
621
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +0900622 /* Check if there is %return suffix */
623 tmp = strchr(arg, '%');
624 if (tmp) {
625 if (!strcmp(tmp, "%return")) {
626 *tmp = '\0';
627 is_return = true;
628 } else {
629 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
630 ret = -EINVAL;
631 goto fail_address_parse;
632 }
633 }
634
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530635 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100636 ret = kstrtoul(arg, 0, &offset);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500637 if (ret) {
638 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100639 goto fail_address_parse;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500640 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530641
642 /* setup a probe */
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500643 trace_probe_log_set_index(0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900644 if (event) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500645 ret = traceprobe_parse_event_name(&event, &group, buf,
646 event - argv[0]);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900647 if (ret)
648 goto fail_address_parse;
649 } else {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800650 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530651 char *ptr;
652
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800653 tail = kstrdup(kbasename(filename), GFP_KERNEL);
654 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530655 ret = -ENOMEM;
656 goto fail_address_parse;
657 }
658
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530659 ptr = strpbrk(tail, ".-_");
660 if (ptr)
661 *ptr = '\0';
662
663 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
664 event = buf;
665 kfree(tail);
666 }
667
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500668 argc -= 2;
669 argv += 2;
670
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100671 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530672 if (IS_ERR(tu)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530673 ret = PTR_ERR(tu);
Masami Hiramatsua0394802019-03-14 13:30:50 +0900674 /* This must return -ENOMEM otherwise there is a bug */
675 WARN_ON_ONCE(ret != -ENOMEM);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530676 goto fail_address_parse;
677 }
678 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530679 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700680 tu->path = path;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900681 tu->filename = filename;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530682
683 /* parse arguments */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530684 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500685 trace_probe_log_set_index(i + 2);
Steven Rostedt (VMware)fcd9db52021-08-16 23:42:58 -0400686 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i],
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900687 is_return ? TPARG_FL_RETURN : 0);
Masami Hiramatsud00bbea92018-11-05 18:01:40 +0900688 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530689 goto error;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530690 }
691
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -0400692 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
693 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
Masami Hiramatsub4d4b962019-06-01 00:16:56 +0900694 if (ret < 0)
695 goto error;
696
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530697 ret = register_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500698 if (!ret)
699 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530700
701error:
702 free_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500703out:
704 trace_probe_log_clear();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530705 return ret;
706
707fail_address_parse:
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500708 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700709 path_put(&path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900710 kfree(filename);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530711
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530712 return ret;
713}
714
Masami Hiramatsud2622712021-02-01 13:48:11 -0600715int trace_uprobe_create(const char *raw_command)
716{
717 return trace_probe_create(raw_command, __trace_uprobe_create);
718}
719
720static int create_or_delete_trace_uprobe(const char *raw_command)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530721{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900722 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530723
Masami Hiramatsud2622712021-02-01 13:48:11 -0600724 if (raw_command[0] == '-')
725 return dyn_event_release(raw_command, &trace_uprobe_ops);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900726
Masami Hiramatsud2622712021-02-01 13:48:11 -0600727 ret = trace_uprobe_create(raw_command);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900728 return ret == -ECANCELED ? -EINVAL : ret;
729}
730
731static int trace_uprobe_release(struct dyn_event *ev)
732{
733 struct trace_uprobe *tu = to_trace_uprobe(ev);
734
735 return unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530736}
737
738/* Probes listing interfaces */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900739static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530740{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900741 struct trace_uprobe *tu = to_trace_uprobe(ev);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100742 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530743 int i;
744
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900745 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
746 trace_probe_name(&tu->tp), tu->filename,
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530747 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530748
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530749 if (tu->ref_ctr_offset)
750 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
751
Namhyung Kim14577c32013-07-03 15:42:53 +0900752 for (i = 0; i < tu->tp.nr_args; i++)
753 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530754
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100755 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530756 return 0;
757}
758
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900759static int probes_seq_show(struct seq_file *m, void *v)
760{
761 struct dyn_event *ev = v;
762
763 if (!is_trace_uprobe(ev))
764 return 0;
765
766 return trace_uprobe_show(m, ev);
767}
768
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530769static const struct seq_operations probes_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900770 .start = dyn_event_seq_start,
771 .next = dyn_event_seq_next,
772 .stop = dyn_event_seq_stop,
773 .show = probes_seq_show
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530774};
775
776static int probes_open(struct inode *inode, struct file *file)
777{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400778 int ret;
779
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400780 ret = security_locked_down(LOCKDOWN_TRACEFS);
781 if (ret)
782 return ret;
783
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400784 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900785 ret = dyn_events_release_all(&trace_uprobe_ops);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400786 if (ret)
787 return ret;
788 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530789
790 return seq_open(file, &probes_seq_op);
791}
792
793static ssize_t probes_write(struct file *file, const char __user *buffer,
794 size_t count, loff_t *ppos)
795{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900796 return trace_parse_run_command(file, buffer, count, ppos,
797 create_or_delete_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530798}
799
800static const struct file_operations uprobe_events_ops = {
801 .owner = THIS_MODULE,
802 .open = probes_open,
803 .read = seq_read,
804 .llseek = seq_lseek,
805 .release = seq_release,
806 .write = probes_write,
807};
808
809/* Probes profiling interfaces */
810static int probes_profile_seq_show(struct seq_file *m, void *v)
811{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900812 struct dyn_event *ev = v;
813 struct trace_uprobe *tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530814
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900815 if (!is_trace_uprobe(ev))
816 return 0;
817
818 tu = to_trace_uprobe(ev);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400819 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Masami Hiramatsub55ce2032019-06-01 00:17:47 +0900820 trace_probe_name(&tu->tp), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530821 return 0;
822}
823
824static const struct seq_operations profile_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900825 .start = dyn_event_seq_start,
826 .next = dyn_event_seq_next,
827 .stop = dyn_event_seq_stop,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530828 .show = probes_profile_seq_show
829};
830
831static int profile_open(struct inode *inode, struct file *file)
832{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400833 int ret;
834
835 ret = security_locked_down(LOCKDOWN_TRACEFS);
836 if (ret)
837 return ret;
838
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530839 return seq_open(file, &profile_seq_op);
840}
841
842static const struct file_operations uprobe_profile_ops = {
843 .owner = THIS_MODULE,
844 .open = profile_open,
845 .read = seq_read,
846 .llseek = seq_lseek,
847 .release = seq_release,
848};
849
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900850struct uprobe_cpu_buffer {
851 struct mutex mutex;
852 void *buf;
853};
854static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
855static int uprobe_buffer_refcnt;
856
857static int uprobe_buffer_init(void)
858{
859 int cpu, err_cpu;
860
861 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
862 if (uprobe_cpu_buffer == NULL)
863 return -ENOMEM;
864
865 for_each_possible_cpu(cpu) {
866 struct page *p = alloc_pages_node(cpu_to_node(cpu),
867 GFP_KERNEL, 0);
868 if (p == NULL) {
869 err_cpu = cpu;
870 goto err;
871 }
872 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
873 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
874 }
875
876 return 0;
877
878err:
879 for_each_possible_cpu(cpu) {
880 if (cpu == err_cpu)
881 break;
882 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
883 }
884
885 free_percpu(uprobe_cpu_buffer);
886 return -ENOMEM;
887}
888
889static int uprobe_buffer_enable(void)
890{
891 int ret = 0;
892
893 BUG_ON(!mutex_is_locked(&event_mutex));
894
895 if (uprobe_buffer_refcnt++ == 0) {
896 ret = uprobe_buffer_init();
897 if (ret < 0)
898 uprobe_buffer_refcnt--;
899 }
900
901 return ret;
902}
903
904static void uprobe_buffer_disable(void)
905{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800906 int cpu;
907
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900908 BUG_ON(!mutex_is_locked(&event_mutex));
909
910 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800911 for_each_possible_cpu(cpu)
912 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
913 cpu)->buf);
914
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900915 free_percpu(uprobe_cpu_buffer);
916 uprobe_cpu_buffer = NULL;
917 }
918}
919
920static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
921{
922 struct uprobe_cpu_buffer *ucb;
923 int cpu;
924
925 cpu = raw_smp_processor_id();
926 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
927
928 /*
929 * Use per-cpu buffers for fastest access, but we might migrate
930 * so the mutex makes sure we have sole access to it.
931 */
932 mutex_lock(&ucb->mutex);
933
934 return ucb;
935}
936
937static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
938{
939 mutex_unlock(&ucb->mutex);
940}
941
Namhyung Kima43b9702014-01-17 17:08:36 +0900942static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900943 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900944 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400945 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530946{
947 struct uprobe_trace_entry_head *entry;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500948 struct trace_buffer *buffer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530949 struct ring_buffer_event *event;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100950 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900951 int size, esize;
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +0900952 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530953
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400954 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900955
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900956 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100957 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530958
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400959 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900960 return;
961
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900962 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900963 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400964 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100965 call->event.type, size, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900966 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900967 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900968
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530969 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100970 if (is_ret_probe(tu)) {
971 entry->vaddr[0] = func;
972 entry->vaddr[1] = instruction_pointer(regs);
973 data = DATAOF_TRACE_ENTRY(entry, true);
974 } else {
975 entry->vaddr[0] = instruction_pointer(regs);
976 data = DATAOF_TRACE_ENTRY(entry, false);
977 }
978
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900979 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530980
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100981 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100982}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100983
Oleg Nesterova51cc602013-03-30 18:02:12 +0100984/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900985static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
986 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100987{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900988 struct event_file_link *link;
989
990 if (is_ret_probe(tu))
991 return 0;
992
993 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +0900994 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900995 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
996 rcu_read_unlock();
997
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100998 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530999}
1000
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001001static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001002 struct pt_regs *regs,
1003 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001004{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001005 struct event_file_link *link;
1006
1007 rcu_read_lock();
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001008 trace_probe_for_each_link_rcu(link, &tu->tp)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001009 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1010 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001011}
1012
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301013/* Event entry printers */
1014static enum print_line_t
1015print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1016{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001017 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301018 struct trace_seq *s = &iter->seq;
1019 struct trace_uprobe *tu;
1020 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301021
Oleg Nesterov457d1772013-03-29 18:26:51 +01001022 entry = (struct uprobe_trace_entry_head *)iter->ent;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001023 tu = trace_uprobe_primary_from_call(
1024 container_of(event, struct trace_event_call, event));
1025 if (unlikely(!tu))
1026 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301027
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001028 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001029 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001030 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001031 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001032 data = DATAOF_TRACE_ENTRY(entry, true);
1033 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001034 trace_seq_printf(s, "%s: (0x%lx)",
Masami Hiramatsub55ce2032019-06-01 00:17:47 +09001035 trace_probe_name(&tu->tp),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001036 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +01001037 data = DATAOF_TRACE_ENTRY(entry, false);
1038 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301039
Masami Hiramatsu56de7632018-04-25 21:16:36 +09001040 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1041 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301042
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001043 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301044
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -05001045 out:
1046 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301047}
1048
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001049typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1050 enum uprobe_filter_ctx ctx,
1051 struct mm_struct *mm);
1052
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001053static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301054{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001055 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301056
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001057 tu->consumer.filter = filter;
1058 tu->inode = d_real_inode(tu->path.dentry);
1059
1060 if (tu->ref_ctr_offset)
1061 ret = uprobe_register_refctr(tu->inode, tu->offset,
1062 tu->ref_ctr_offset, &tu->consumer);
1063 else
1064 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1065
1066 if (ret)
1067 tu->inode = NULL;
1068
1069 return ret;
1070}
1071
1072static void __probe_event_disable(struct trace_probe *tp)
1073{
1074 struct trace_probe *pos;
1075 struct trace_uprobe *tu;
1076
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001077 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001078 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001079
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001080 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1081 tu = container_of(pos, struct trace_uprobe, tp);
1082 if (!tu->inode)
1083 continue;
1084
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001085 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1086 tu->inode = NULL;
1087 }
1088}
1089
1090static int probe_event_enable(struct trace_event_call *call,
1091 struct trace_event_file *file, filter_func_t filter)
1092{
1093 struct trace_probe *pos, *tp;
1094 struct trace_uprobe *tu;
1095 bool enabled;
1096 int ret;
1097
1098 tp = trace_probe_primary_from_call(call);
1099 if (WARN_ON_ONCE(!tp))
1100 return -ENODEV;
1101 enabled = trace_probe_is_enabled(tp);
1102
1103 /* This may also change "enabled" state */
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001104 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001105 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001106 return -EINTR;
1107
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001108 ret = trace_probe_add_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001109 if (ret < 0)
1110 return ret;
Oleg Nesterov48212542014-06-27 19:01:36 +02001111 } else {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001112 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
Oleg Nesterov48212542014-06-27 19:01:36 +02001113 return -EINTR;
1114
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001115 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
Oleg Nesterov48212542014-06-27 19:01:36 +02001116 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301117
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001118 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001119 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Oleg Nesterov736288b2013-02-03 20:58:35 +01001120
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001121 if (enabled)
1122 return 0;
1123
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001124 ret = uprobe_buffer_enable();
1125 if (ret)
1126 goto err_flags;
1127
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001128 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1129 tu = container_of(pos, struct trace_uprobe, tp);
1130 ret = trace_uprobe_enable(tu, filter);
1131 if (ret) {
1132 __probe_event_disable(tp);
1133 goto err_buffer;
1134 }
Ravi Bangoria1cc33162018-08-20 10:12:47 +05301135 }
1136
Oleg Nesterovfb6bab62014-06-27 19:01:46 +02001137 return 0;
1138
1139 err_buffer:
1140 uprobe_buffer_disable();
1141
1142 err_flags:
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001143 if (file)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001144 trace_probe_remove_file(tp, file);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001145 else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001146 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001147
Oleg Nesterov41618242013-01-27 18:36:24 +01001148 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301149}
1150
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001151static void probe_event_disable(struct trace_event_call *call,
1152 struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301153{
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001154 struct trace_probe *tp;
1155
1156 tp = trace_probe_primary_from_call(call);
1157 if (WARN_ON_ONCE(!tp))
1158 return;
1159
1160 if (!trace_probe_is_enabled(tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301161 return;
1162
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001163 if (file) {
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001164 if (trace_probe_remove_file(tp, file) < 0)
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001165 return;
1166
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001167 if (trace_probe_is_enabled(tp))
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001168 return;
Masami Hiramatsub5f935e2019-06-01 00:17:26 +09001169 } else
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001170 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001171
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001172 __probe_event_disable(tp);
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001173 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301174}
1175
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001176static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301177{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001178 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301179 struct uprobe_trace_entry_head field;
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001180 struct trace_uprobe *tu;
1181
1182 tu = trace_uprobe_primary_from_call(event_call);
1183 if (unlikely(!tu))
1184 return -ENODEV;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301185
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001186 if (is_ret_probe(tu)) {
1187 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1188 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1189 size = SIZEOF_TRACE_ENTRY(true);
1190 } else {
1191 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1192 size = SIZEOF_TRACE_ENTRY(false);
1193 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001194
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001195 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301196}
1197
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301198#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001199static bool
1200__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1201{
1202 struct perf_event *event;
1203
1204 if (filter->nr_systemwide)
1205 return true;
1206
1207 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001208 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001209 return true;
1210 }
1211
1212 return false;
1213}
1214
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001215static inline bool
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001216trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1217 struct perf_event *event)
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001218{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001219 return __uprobe_perf_filter(filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001220}
1221
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001222static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1223 struct perf_event *event)
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001224{
1225 bool done;
1226
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001227 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001228 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001229 list_del(&event->hw.tp_list);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001230 done = filter->nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001231 (event->hw.target->flags & PF_EXITING) ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001232 trace_uprobe_filter_event(filter, event);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001233 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001234 filter->nr_systemwide--;
1235 done = filter->nr_systemwide;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001236 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001237 write_unlock(&filter->rwlock);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001238
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001239 return done;
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001240}
1241
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001242/* This returns true if the filter always covers target mm */
1243static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1244 struct perf_event *event)
Oleg Nesterov736288b2013-02-03 20:58:35 +01001245{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001246 bool done;
1247
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001248 write_lock(&filter->rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001249 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001250 /*
1251 * event->parent != NULL means copy_process(), we can avoid
1252 * uprobe_apply(). current->mm must be probed and we can rely
1253 * on dup_mmap() which preserves the already installed bp's.
1254 *
1255 * attr.enable_on_exec means that exec/mmap will install the
1256 * breakpoints we need.
1257 */
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001258 done = filter->nr_systemwide ||
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001259 event->parent || event->attr.enable_on_exec ||
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001260 trace_uprobe_filter_event(filter, event);
1261 list_add(&event->hw.tp_list, &filter->perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001262 } else {
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001263 done = filter->nr_systemwide;
1264 filter->nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001265 }
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001266 write_unlock(&filter->rwlock);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001267
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001268 return done;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001269}
1270
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001271static int uprobe_perf_close(struct trace_event_call *call,
1272 struct perf_event *event)
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001273{
1274 struct trace_probe *pos, *tp;
1275 struct trace_uprobe *tu;
1276 int ret = 0;
1277
1278 tp = trace_probe_primary_from_call(call);
1279 if (WARN_ON_ONCE(!tp))
1280 return -ENODEV;
1281
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001282 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001283 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001284 return 0;
1285
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001286 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1287 tu = container_of(pos, struct trace_uprobe, tp);
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001288 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001289 if (ret)
1290 break;
1291 }
1292
1293 return ret;
1294}
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001295
1296static int uprobe_perf_open(struct trace_event_call *call,
1297 struct perf_event *event)
1298{
1299 struct trace_probe *pos, *tp;
1300 struct trace_uprobe *tu;
1301 int err = 0;
1302
1303 tp = trace_probe_primary_from_call(call);
1304 if (WARN_ON_ONCE(!tp))
1305 return -ENODEV;
1306
1307 tu = container_of(tp, struct trace_uprobe, tp);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001308 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001309 return 0;
1310
1311 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1312 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1313 if (err) {
1314 uprobe_perf_close(call, event);
1315 break;
1316 }
1317 }
1318
1319 return err;
1320}
1321
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001322static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1323 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1324{
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001325 struct trace_uprobe_filter *filter;
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001326 struct trace_uprobe *tu;
1327 int ret;
1328
1329 tu = container_of(uc, struct trace_uprobe, consumer);
Masami Hiramatsub61387c2020-01-22 12:23:25 +09001330 filter = tu->tp.event->filter;
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001331
1332 read_lock(&filter->rwlock);
1333 ret = __uprobe_perf_filter(filter, mm);
1334 read_unlock(&filter->rwlock);
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001335
1336 return ret;
1337}
1338
Namhyung Kima43b9702014-01-17 17:08:36 +09001339static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001340 unsigned long func, struct pt_regs *regs,
1341 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301342{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001343 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301344 struct uprobe_trace_entry_head *entry;
1345 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001346 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001347 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001348 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301349
Alexei Starovoitov70ed0702020-02-24 11:27:15 -08001350 if (bpf_prog_array_valid(call)) {
1351 u32 ret;
1352
1353 preempt_disable();
1354 ret = trace_call_bpf(call, regs);
1355 preempt_enable();
1356 if (!ret)
1357 return;
1358 }
Wang Nan04a22fa2015-07-01 02:13:50 +00001359
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001360 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1361
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001362 size = esize + tu->tp.size + dsize;
1363 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1364 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1365 return;
1366
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301367 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001368 head = this_cpu_ptr(call->perf_events);
1369 if (hlist_empty(head))
1370 goto out;
1371
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001372 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301373 if (!entry)
1374 goto out;
1375
Oleg Nesterov393a7362013-03-30 18:46:22 +01001376 if (is_ret_probe(tu)) {
1377 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001378 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001379 data = DATAOF_TRACE_ENTRY(entry, true);
1380 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001381 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001382 data = DATAOF_TRACE_ENTRY(entry, false);
1383 }
1384
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001385 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001386
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001387 if (size - esize > tu->tp.size + dsize) {
1388 int len = tu->tp.size + dsize;
1389
1390 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001391 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301392
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001393 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001394 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301395 out:
1396 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001397}
1398
1399/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001400static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1401 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001402{
1403 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1404 return UPROBE_HANDLER_REMOVE;
1405
Oleg Nesterov393a7362013-03-30 18:46:22 +01001406 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001407 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001408 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301409}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001410
1411static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001412 struct pt_regs *regs,
1413 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001414{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001415 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001416}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001417
1418int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1419 const char **filename, u64 *probe_offset,
1420 bool perf_type_tracepoint)
1421{
1422 const char *pevent = trace_event_name(event->tp_event);
1423 const char *group = event->tp_event->class->system;
1424 struct trace_uprobe *tu;
1425
1426 if (perf_type_tracepoint)
1427 tu = find_probe_event(pevent, group);
1428 else
Jean-Philippe Brucker22d5bd62020-06-08 14:45:32 +02001429 tu = trace_uprobe_primary_from_call(event->tp_event);
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001430 if (!tu)
1431 return -EINVAL;
1432
1433 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1434 : BPF_FD_TYPE_UPROBE;
1435 *filename = tu->filename;
1436 *probe_offset = tu->offset;
1437 return 0;
1438}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301439#endif /* CONFIG_PERF_EVENTS */
1440
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001441static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001442trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001443 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301444{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001445 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301446
1447 switch (type) {
1448 case TRACE_REG_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001449 return probe_event_enable(event, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301450
1451 case TRACE_REG_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001452 probe_event_disable(event, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301453 return 0;
1454
1455#ifdef CONFIG_PERF_EVENTS
1456 case TRACE_REG_PERF_REGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001457 return probe_event_enable(event, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301458
1459 case TRACE_REG_PERF_UNREGISTER:
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001460 probe_event_disable(event, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301461 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001462
1463 case TRACE_REG_PERF_OPEN:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001464 return uprobe_perf_open(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001465
1466 case TRACE_REG_PERF_CLOSE:
Masami Hiramatsu99c9a9232020-01-10 10:45:39 +09001467 return uprobe_perf_close(event, data);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001468
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301469#endif
1470 default:
1471 return 0;
1472 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301473}
1474
1475static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1476{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301477 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001478 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001479 struct uprobe_cpu_buffer *ucb;
1480 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001481 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301482
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001483
Oleg Nesterova932b732013-01-31 19:47:23 +01001484 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001485 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301486
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001487 udd.tu = tu;
1488 udd.bp_addr = instruction_pointer(regs);
1489
1490 current->utask->vaddr = (unsigned long) &udd;
1491
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001492 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1493 return 0;
1494
1495 dsize = __get_data_size(&tu->tp, regs);
1496 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1497
1498 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001499 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001500
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001501 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001502 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301503
1504#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001505 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001506 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301507#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001508 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001509 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301510}
1511
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001512static int uretprobe_dispatcher(struct uprobe_consumer *con,
1513 unsigned long func, struct pt_regs *regs)
1514{
1515 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001516 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001517 struct uprobe_cpu_buffer *ucb;
1518 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001519
1520 tu = container_of(con, struct trace_uprobe, consumer);
1521
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001522 udd.tu = tu;
1523 udd.bp_addr = func;
1524
1525 current->utask->vaddr = (unsigned long) &udd;
1526
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001527 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1528 return 0;
1529
1530 dsize = __get_data_size(&tu->tp, regs);
1531 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1532
1533 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001534 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001535
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001536 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001537 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001538
1539#ifdef CONFIG_PERF_EVENTS
Masami Hiramatsu747774d2019-06-01 00:17:37 +09001540 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001541 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001542#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001543 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001544 return 0;
1545}
1546
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301547static struct trace_event_functions uprobe_funcs = {
1548 .trace = print_uprobe_event
1549};
1550
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001551static struct trace_event_fields uprobe_fields_array[] = {
1552 { .type = TRACE_FUNCTION_TYPE,
1553 .define_fields = uprobe_event_define_fields },
1554 {}
1555};
1556
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001557static inline void init_trace_event_call(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301558{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001559 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301560 call->event.funcs = &uprobe_funcs;
Peter Zijlstra04ae87a2019-10-24 22:26:59 +02001561 call->class->fields_array = uprobe_fields_array;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301562
Song Liu9fd2e482019-05-07 09:15:45 -07001563 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
Song Liu33ea4b22017-12-06 14:45:16 -08001564 call->class->reg = trace_uprobe_register;
Song Liu33ea4b22017-12-06 14:45:16 -08001565}
1566
1567static int register_uprobe_event(struct trace_uprobe *tu)
1568{
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001569 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001570
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001571 return trace_probe_register_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301572}
1573
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001574static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301575{
Masami Hiramatsu46e53762019-06-01 00:17:16 +09001576 return trace_probe_unregister_event_call(&tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301577}
1578
Song Liu33ea4b22017-12-06 14:45:16 -08001579#ifdef CONFIG_PERF_EVENTS
1580struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001581create_local_trace_uprobe(char *name, unsigned long offs,
1582 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001583{
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -04001584 enum probe_print_type ptype;
Song Liu33ea4b22017-12-06 14:45:16 -08001585 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001586 struct path path;
1587 int ret;
1588
1589 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1590 if (ret)
1591 return ERR_PTR(ret);
1592
Song Liu0c92c7a2018-04-23 10:21:34 -07001593 if (!d_is_reg(path.dentry)) {
1594 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001595 return ERR_PTR(-EINVAL);
1596 }
1597
1598 /*
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001599 * local trace_kprobes are not added to dyn_event, so they are never
Song Liu33ea4b22017-12-06 14:45:16 -08001600 * searched in find_trace_kprobe(). Therefore, there is no concern of
1601 * duplicated name "DUMMY_EVENT" here.
1602 */
1603 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1604 is_return);
1605
1606 if (IS_ERR(tu)) {
1607 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1608 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001609 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001610 return ERR_CAST(tu);
1611 }
1612
1613 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001614 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001615 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001616 tu->filename = kstrdup(name, GFP_KERNEL);
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001617 init_trace_event_call(tu);
Song Liu33ea4b22017-12-06 14:45:16 -08001618
Steven Rostedt (VMware)007517a2021-08-19 00:13:27 -04001619 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1620 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001621 ret = -ENOMEM;
1622 goto error;
1623 }
1624
Masami Hiramatsue3dc9f82019-06-01 00:17:57 +09001625 return trace_probe_event_call(&tu->tp);
Song Liu33ea4b22017-12-06 14:45:16 -08001626error:
1627 free_trace_uprobe(tu);
1628 return ERR_PTR(ret);
1629}
1630
1631void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1632{
1633 struct trace_uprobe *tu;
1634
Masami Hiramatsu60d53e22019-06-20 00:07:20 +09001635 tu = trace_uprobe_primary_from_call(event_call);
Song Liu33ea4b22017-12-06 14:45:16 -08001636
Song Liu33ea4b22017-12-06 14:45:16 -08001637 free_trace_uprobe(tu);
1638}
1639#endif /* CONFIG_PERF_EVENTS */
1640
Bhaskar Chowdhury39bcdd62021-01-12 10:20:08 +05301641/* Make a trace interface for controlling probe points */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301642static __init int init_uprobe_trace(void)
1643{
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001644 int ret;
1645
1646 ret = dyn_event_register(&trace_uprobe_ops);
1647 if (ret)
1648 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301649
Wei Yang22c36b12020-07-12 09:10:36 +08001650 ret = tracing_init_dentry();
1651 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301652 return 0;
1653
Wei Yang22c36b12020-07-12 09:10:36 +08001654 trace_create_file("uprobe_events", 0644, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301655 NULL, &uprobe_events_ops);
1656 /* Profile interface */
Wei Yang22c36b12020-07-12 09:10:36 +08001657 trace_create_file("uprobe_profile", 0444, NULL,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301658 NULL, &uprobe_profile_ops);
1659 return 0;
1660}
1661
1662fs_initcall(init_uprobe_trace);